| 1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
| 10 | /// Custom DAG lowering for SI |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "SIISelLowering.h" |
| 15 | #include "AMDGPU.h" |
| 16 | #include "AMDGPUInstrInfo.h" |
| 17 | #include "AMDGPUTargetMachine.h" |
| 18 | #include "SIMachineFunctionInfo.h" |
| 19 | #include "SIRegisterInfo.h" |
| 20 | #include "llvm/ADT/Statistic.h" |
| 21 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" |
| 22 | #include "llvm/CodeGen/Analysis.h" |
| 23 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
| 24 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" |
| 25 | #include "llvm/CodeGen/MachineLoopInfo.h" |
| 26 | #include "llvm/IR/DiagnosticInfo.h" |
| 27 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
| 28 | #include "llvm/IR/IntrinsicsR600.h" |
| 29 | #include "llvm/Support/CommandLine.h" |
| 30 | #include "llvm/Support/KnownBits.h" |
| 31 | |
| 32 | using namespace llvm; |
| 33 | |
| 34 | #define DEBUG_TYPE "si-lower" |
| 35 | |
| 36 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 37 | |
| 38 | static cl::opt<bool> DisableLoopAlignment( |
| 39 | "amdgpu-disable-loop-alignment" , |
| 40 | cl::desc("Do not align and prefetch loops" ), |
| 41 | cl::init(false)); |
| 42 | |
| 43 | static cl::opt<bool> VGPRReserveforSGPRSpill( |
| 44 | "amdgpu-reserve-vgpr-for-sgpr-spill" , |
| 45 | cl::desc("Allocates one VGPR for future SGPR Spill" ), cl::init(true)); |
| 46 | |
| 47 | static cl::opt<bool> UseDivergentRegisterIndexing( |
| 48 | "amdgpu-use-divergent-register-indexing" , |
| 49 | cl::Hidden, |
| 50 | cl::desc("Use indirect register addressing for divergent indexes" ), |
| 51 | cl::init(false)); |
| 52 | |
| 53 | static bool hasFP32Denormals(const MachineFunction &MF) { |
| 54 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 55 | return Info->getMode().allFP32Denormals(); |
| 56 | } |
| 57 | |
| 58 | static bool hasFP64FP16Denormals(const MachineFunction &MF) { |
| 59 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 60 | return Info->getMode().allFP64FP16Denormals(); |
| 61 | } |
| 62 | |
| 63 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { |
| 64 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
| 65 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { |
| 66 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { |
| 67 | return AMDGPU::SGPR0 + Reg; |
| 68 | } |
| 69 | } |
| 70 | llvm_unreachable("Cannot allocate sgpr" ); |
| 71 | } |
| 72 | |
| 73 | SITargetLowering::SITargetLowering(const TargetMachine &TM, |
| 74 | const GCNSubtarget &STI) |
| 75 | : AMDGPUTargetLowering(TM, STI), |
| 76 | Subtarget(&STI) { |
| 77 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
| 78 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
| 79 | |
| 80 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); |
| 81 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
| 82 | |
| 83 | addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); |
| 84 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
| 85 | addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); |
| 86 | |
| 87 | addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); |
| 88 | addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); |
| 89 | |
| 90 | addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); |
| 91 | addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); |
| 92 | |
| 93 | addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); |
| 94 | addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); |
| 95 | |
| 96 | addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); |
| 97 | addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); |
| 98 | |
| 99 | addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); |
| 100 | addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); |
| 101 | |
| 102 | addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); |
| 103 | addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass); |
| 104 | |
| 105 | addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); |
| 106 | addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); |
| 107 | |
| 108 | addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); |
| 109 | addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass); |
| 110 | |
| 111 | addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); |
| 112 | addRegisterClass(MVT::v16f64, &AMDGPU::VReg_1024RegClass); |
| 113 | |
| 114 | if (Subtarget->has16BitInsts()) { |
| 115 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); |
| 116 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); |
| 117 | |
| 118 | // Unless there are also VOP3P operations, not operations are really legal. |
| 119 | addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); |
| 120 | addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); |
| 121 | addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); |
| 122 | addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); |
| 123 | } |
| 124 | |
| 125 | addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); |
| 126 | addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass); |
| 127 | |
| 128 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
| 129 | |
| 130 | // The boolean content concept here is too inflexible. Compares only ever |
| 131 | // really produce a 1-bit result. Any copy/extend from these will turn into a |
| 132 | // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as |
| 133 | // it's what most targets use. |
| 134 | setBooleanContents(ZeroOrOneBooleanContent); |
| 135 | setBooleanVectorContents(ZeroOrOneBooleanContent); |
| 136 | |
| 137 | // We need to custom lower vector stores from local memory |
| 138 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); |
| 139 | setOperationAction(ISD::LOAD, MVT::v3i32, Custom); |
| 140 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
| 141 | setOperationAction(ISD::LOAD, MVT::v5i32, Custom); |
| 142 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
| 143 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
| 144 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
| 145 | setOperationAction(ISD::LOAD, MVT::v32i32, Custom); |
| 146 | |
| 147 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); |
| 148 | setOperationAction(ISD::STORE, MVT::v3i32, Custom); |
| 149 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
| 150 | setOperationAction(ISD::STORE, MVT::v5i32, Custom); |
| 151 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
| 152 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
| 153 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
| 154 | setOperationAction(ISD::STORE, MVT::v32i32, Custom); |
| 155 | |
| 156 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); |
| 157 | setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); |
| 158 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); |
| 159 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); |
| 160 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); |
| 161 | setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); |
| 162 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); |
| 163 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); |
| 164 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); |
| 165 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); |
| 166 | setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); |
| 167 | setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand); |
| 168 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); |
| 169 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand); |
| 170 | setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand); |
| 171 | setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand); |
| 172 | |
| 173 | setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand); |
| 174 | setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand); |
| 175 | setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand); |
| 176 | setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); |
| 177 | setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); |
| 178 | |
| 179 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
| 180 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
| 181 | |
| 182 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
| 183 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
| 184 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
| 185 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
| 186 | |
| 187 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
| 188 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
| 189 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
| 190 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
| 191 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
| 192 | |
| 193 | setOperationAction(ISD::SETCC, MVT::i1, Promote); |
| 194 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
| 195 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
| 196 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); |
| 197 | |
| 198 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); |
| 199 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
| 200 | setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand); |
| 201 | setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand); |
| 202 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand); |
| 203 | setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand); |
| 204 | setOperationAction(ISD::TRUNCATE, MVT::v16i32, Expand); |
| 205 | setOperationAction(ISD::FP_ROUND, MVT::v16f32, Expand); |
| 206 | |
| 207 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
| 208 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
| 209 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
| 210 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
| 211 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
| 212 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); |
| 213 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
| 214 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
| 215 | |
| 216 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
| 217 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); |
| 218 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); |
| 219 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); |
| 220 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); |
| 221 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); |
| 222 | |
| 223 | setOperationAction(ISD::UADDO, MVT::i32, Legal); |
| 224 | setOperationAction(ISD::USUBO, MVT::i32, Legal); |
| 225 | |
| 226 | setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); |
| 227 | setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); |
| 228 | |
| 229 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); |
| 230 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); |
| 231 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
| 232 | |
| 233 | #if 0 |
| 234 | setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); |
| 235 | setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); |
| 236 | #endif |
| 237 | |
| 238 | // We only support LOAD/STORE and vector manipulation ops for vectors |
| 239 | // with > 4 elements. |
| 240 | for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, |
| 241 | MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, |
| 242 | MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, |
| 243 | MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32 }) { |
| 244 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
| 245 | switch (Op) { |
| 246 | case ISD::LOAD: |
| 247 | case ISD::STORE: |
| 248 | case ISD::BUILD_VECTOR: |
| 249 | case ISD::BITCAST: |
| 250 | case ISD::EXTRACT_VECTOR_ELT: |
| 251 | case ISD::INSERT_VECTOR_ELT: |
| 252 | case ISD::INSERT_SUBVECTOR: |
| 253 | case ISD::EXTRACT_SUBVECTOR: |
| 254 | case ISD::SCALAR_TO_VECTOR: |
| 255 | break; |
| 256 | case ISD::CONCAT_VECTORS: |
| 257 | setOperationAction(Op, VT, Custom); |
| 258 | break; |
| 259 | default: |
| 260 | setOperationAction(Op, VT, Expand); |
| 261 | break; |
| 262 | } |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); |
| 267 | |
| 268 | // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that |
| 269 | // is expanded to avoid having two separate loops in case the index is a VGPR. |
| 270 | |
| 271 | // Most operations are naturally 32-bit vector operations. We only support |
| 272 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. |
| 273 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { |
| 274 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
| 275 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); |
| 276 | |
| 277 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
| 278 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); |
| 279 | |
| 280 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
| 281 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); |
| 282 | |
| 283 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
| 284 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); |
| 285 | } |
| 286 | |
| 287 | for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) { |
| 288 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
| 289 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32); |
| 290 | |
| 291 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
| 292 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32); |
| 293 | |
| 294 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
| 295 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32); |
| 296 | |
| 297 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
| 298 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); |
| 299 | } |
| 300 | |
| 301 | for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) { |
| 302 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
| 303 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32); |
| 304 | |
| 305 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
| 306 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32); |
| 307 | |
| 308 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
| 309 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32); |
| 310 | |
| 311 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
| 312 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); |
| 313 | } |
| 314 | |
| 315 | for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) { |
| 316 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
| 317 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32); |
| 318 | |
| 319 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
| 320 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32); |
| 321 | |
| 322 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
| 323 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32); |
| 324 | |
| 325 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
| 326 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); |
| 327 | } |
| 328 | |
| 329 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
| 330 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
| 331 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
| 332 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
| 333 | |
| 334 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); |
| 335 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); |
| 336 | |
| 337 | // Avoid stack access for these. |
| 338 | // TODO: Generalize to more vector types. |
| 339 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); |
| 340 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); |
| 341 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); |
| 342 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); |
| 343 | |
| 344 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
| 345 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
| 346 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); |
| 347 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); |
| 348 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); |
| 349 | |
| 350 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); |
| 351 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); |
| 352 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); |
| 353 | |
| 354 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); |
| 355 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); |
| 356 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); |
| 357 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); |
| 358 | |
| 359 | // Deal with vec3 vector operations when widened to vec4. |
| 360 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); |
| 361 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); |
| 362 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); |
| 363 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); |
| 364 | |
| 365 | // Deal with vec5 vector operations when widened to vec8. |
| 366 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); |
| 367 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); |
| 368 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); |
| 369 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); |
| 370 | |
| 371 | // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, |
| 372 | // and output demarshalling |
| 373 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
| 374 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
| 375 | |
| 376 | // We can't return success/failure, only the old value, |
| 377 | // let LLVM add the comparison |
| 378 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); |
| 379 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); |
| 380 | |
| 381 | if (Subtarget->hasFlatAddressSpace()) { |
| 382 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); |
| 383 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); |
| 384 | } |
| 385 | |
| 386 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
| 387 | |
| 388 | // FIXME: This should be narrowed to i32, but that only happens if i64 is |
| 389 | // illegal. |
| 390 | // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. |
| 391 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); |
| 392 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
| 393 | |
| 394 | // On SI this is s_memtime and s_memrealtime on VI. |
| 395 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); |
| 396 | setOperationAction(ISD::TRAP, MVT::Other, Custom); |
| 397 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); |
| 398 | |
| 399 | if (Subtarget->has16BitInsts()) { |
| 400 | setOperationAction(ISD::FPOW, MVT::f16, Promote); |
| 401 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); |
| 402 | setOperationAction(ISD::FLOG, MVT::f16, Custom); |
| 403 | setOperationAction(ISD::FEXP, MVT::f16, Custom); |
| 404 | setOperationAction(ISD::FLOG10, MVT::f16, Custom); |
| 405 | } |
| 406 | |
| 407 | if (Subtarget->hasMadMacF32Insts()) |
| 408 | setOperationAction(ISD::FMAD, MVT::f32, Legal); |
| 409 | |
| 410 | if (!Subtarget->hasBFI()) { |
| 411 | // fcopysign can be done in a single instruction with BFI. |
| 412 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
| 413 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
| 414 | } |
| 415 | |
| 416 | if (!Subtarget->hasBCNT(32)) |
| 417 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
| 418 | |
| 419 | if (!Subtarget->hasBCNT(64)) |
| 420 | setOperationAction(ISD::CTPOP, MVT::i64, Expand); |
| 421 | |
| 422 | if (Subtarget->hasFFBH()) |
| 423 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); |
| 424 | |
| 425 | if (Subtarget->hasFFBL()) |
| 426 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); |
| 427 | |
| 428 | // We only really have 32-bit BFE instructions (and 16-bit on VI). |
| 429 | // |
| 430 | // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any |
| 431 | // effort to match them now. We want this to be false for i64 cases when the |
| 432 | // extraction isn't restricted to the upper or lower half. Ideally we would |
| 433 | // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that |
| 434 | // span the midpoint are probably relatively rare, so don't worry about them |
| 435 | // for now. |
| 436 | if (Subtarget->hasBFE()) |
| 437 | setHasExtractBitsInsn(true); |
| 438 | |
| 439 | // Clamp modifier on add/sub |
| 440 | if (Subtarget->hasIntClamp()) { |
| 441 | setOperationAction(ISD::UADDSAT, MVT::i32, Legal); |
| 442 | setOperationAction(ISD::USUBSAT, MVT::i32, Legal); |
| 443 | } |
| 444 | |
| 445 | if (Subtarget->hasAddNoCarry()) { |
| 446 | setOperationAction(ISD::SADDSAT, MVT::i16, Legal); |
| 447 | setOperationAction(ISD::SSUBSAT, MVT::i16, Legal); |
| 448 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); |
| 449 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); |
| 450 | } |
| 451 | |
| 452 | setOperationAction(ISD::FMINNUM, MVT::f32, Custom); |
| 453 | setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); |
| 454 | setOperationAction(ISD::FMINNUM, MVT::f64, Custom); |
| 455 | setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); |
| 456 | |
| 457 | |
| 458 | // These are really only legal for ieee_mode functions. We should be avoiding |
| 459 | // them for functions that don't have ieee_mode enabled, so just say they are |
| 460 | // legal. |
| 461 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); |
| 462 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); |
| 463 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); |
| 464 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); |
| 465 | |
| 466 | |
| 467 | if (Subtarget->haveRoundOpsF64()) { |
| 468 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
| 469 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
| 470 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
| 471 | } else { |
| 472 | setOperationAction(ISD::FCEIL, MVT::f64, Custom); |
| 473 | setOperationAction(ISD::FTRUNC, MVT::f64, Custom); |
| 474 | setOperationAction(ISD::FRINT, MVT::f64, Custom); |
| 475 | setOperationAction(ISD::FFLOOR, MVT::f64, Custom); |
| 476 | } |
| 477 | |
| 478 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
| 479 | |
| 480 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
| 481 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
| 482 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
| 483 | setOperationAction(ISD::FDIV, MVT::f64, Custom); |
| 484 | |
| 485 | if (Subtarget->has16BitInsts()) { |
| 486 | setOperationAction(ISD::Constant, MVT::i16, Legal); |
| 487 | |
| 488 | setOperationAction(ISD::SMIN, MVT::i16, Legal); |
| 489 | setOperationAction(ISD::SMAX, MVT::i16, Legal); |
| 490 | |
| 491 | setOperationAction(ISD::UMIN, MVT::i16, Legal); |
| 492 | setOperationAction(ISD::UMAX, MVT::i16, Legal); |
| 493 | |
| 494 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); |
| 495 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); |
| 496 | |
| 497 | setOperationAction(ISD::ROTR, MVT::i16, Expand); |
| 498 | setOperationAction(ISD::ROTL, MVT::i16, Expand); |
| 499 | |
| 500 | setOperationAction(ISD::SDIV, MVT::i16, Promote); |
| 501 | setOperationAction(ISD::UDIV, MVT::i16, Promote); |
| 502 | setOperationAction(ISD::SREM, MVT::i16, Promote); |
| 503 | setOperationAction(ISD::UREM, MVT::i16, Promote); |
| 504 | setOperationAction(ISD::UADDSAT, MVT::i16, Legal); |
| 505 | setOperationAction(ISD::USUBSAT, MVT::i16, Legal); |
| 506 | |
| 507 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); |
| 508 | |
| 509 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); |
| 510 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); |
| 511 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); |
| 512 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); |
| 513 | setOperationAction(ISD::CTPOP, MVT::i16, Promote); |
| 514 | |
| 515 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); |
| 516 | |
| 517 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); |
| 518 | |
| 519 | setOperationAction(ISD::LOAD, MVT::i16, Custom); |
| 520 | |
| 521 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); |
| 522 | |
| 523 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); |
| 524 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); |
| 525 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); |
| 526 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); |
| 527 | |
| 528 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); |
| 529 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); |
| 530 | |
| 531 | // F16 - Constant Actions. |
| 532 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); |
| 533 | |
| 534 | // F16 - Load/Store Actions. |
| 535 | setOperationAction(ISD::LOAD, MVT::f16, Promote); |
| 536 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); |
| 537 | setOperationAction(ISD::STORE, MVT::f16, Promote); |
| 538 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); |
| 539 | |
| 540 | // F16 - VOP1 Actions. |
| 541 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); |
| 542 | setOperationAction(ISD::FCOS, MVT::f16, Custom); |
| 543 | setOperationAction(ISD::FSIN, MVT::f16, Custom); |
| 544 | |
| 545 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); |
| 546 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); |
| 547 | |
| 548 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); |
| 549 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); |
| 550 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); |
| 551 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); |
| 552 | setOperationAction(ISD::FROUND, MVT::f16, Custom); |
| 553 | |
| 554 | // F16 - VOP2 Actions. |
| 555 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); |
| 556 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); |
| 557 | |
| 558 | setOperationAction(ISD::FDIV, MVT::f16, Custom); |
| 559 | |
| 560 | // F16 - VOP3 Actions. |
| 561 | setOperationAction(ISD::FMA, MVT::f16, Legal); |
| 562 | if (STI.hasMadF16()) |
| 563 | setOperationAction(ISD::FMAD, MVT::f16, Legal); |
| 564 | |
| 565 | for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { |
| 566 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
| 567 | switch (Op) { |
| 568 | case ISD::LOAD: |
| 569 | case ISD::STORE: |
| 570 | case ISD::BUILD_VECTOR: |
| 571 | case ISD::BITCAST: |
| 572 | case ISD::EXTRACT_VECTOR_ELT: |
| 573 | case ISD::INSERT_VECTOR_ELT: |
| 574 | case ISD::INSERT_SUBVECTOR: |
| 575 | case ISD::EXTRACT_SUBVECTOR: |
| 576 | case ISD::SCALAR_TO_VECTOR: |
| 577 | break; |
| 578 | case ISD::CONCAT_VECTORS: |
| 579 | setOperationAction(Op, VT, Custom); |
| 580 | break; |
| 581 | default: |
| 582 | setOperationAction(Op, VT, Expand); |
| 583 | break; |
| 584 | } |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | // v_perm_b32 can handle either of these. |
| 589 | setOperationAction(ISD::BSWAP, MVT::i16, Legal); |
| 590 | setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); |
| 591 | setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); |
| 592 | |
| 593 | // XXX - Do these do anything? Vector constants turn into build_vector. |
| 594 | setOperationAction(ISD::Constant, MVT::v2i16, Legal); |
| 595 | setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); |
| 596 | |
| 597 | setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); |
| 598 | setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); |
| 599 | |
| 600 | setOperationAction(ISD::STORE, MVT::v2i16, Promote); |
| 601 | AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); |
| 602 | setOperationAction(ISD::STORE, MVT::v2f16, Promote); |
| 603 | AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); |
| 604 | |
| 605 | setOperationAction(ISD::LOAD, MVT::v2i16, Promote); |
| 606 | AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); |
| 607 | setOperationAction(ISD::LOAD, MVT::v2f16, Promote); |
| 608 | AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); |
| 609 | |
| 610 | setOperationAction(ISD::AND, MVT::v2i16, Promote); |
| 611 | AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); |
| 612 | setOperationAction(ISD::OR, MVT::v2i16, Promote); |
| 613 | AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); |
| 614 | setOperationAction(ISD::XOR, MVT::v2i16, Promote); |
| 615 | AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); |
| 616 | |
| 617 | setOperationAction(ISD::LOAD, MVT::v4i16, Promote); |
| 618 | AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); |
| 619 | setOperationAction(ISD::LOAD, MVT::v4f16, Promote); |
| 620 | AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); |
| 621 | |
| 622 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); |
| 623 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); |
| 624 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); |
| 625 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); |
| 626 | |
| 627 | setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); |
| 628 | setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); |
| 629 | setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); |
| 630 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); |
| 631 | |
| 632 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); |
| 633 | setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); |
| 634 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); |
| 635 | |
| 636 | if (!Subtarget->hasVOP3PInsts()) { |
| 637 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); |
| 638 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); |
| 639 | } |
| 640 | |
| 641 | setOperationAction(ISD::FNEG, MVT::v2f16, Legal); |
| 642 | // This isn't really legal, but this avoids the legalizer unrolling it (and |
| 643 | // allows matching fneg (fabs x) patterns) |
| 644 | setOperationAction(ISD::FABS, MVT::v2f16, Legal); |
| 645 | |
| 646 | setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); |
| 647 | setOperationAction(ISD::FMINNUM, MVT::f16, Custom); |
| 648 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); |
| 649 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); |
| 650 | |
| 651 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); |
| 652 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); |
| 653 | |
| 654 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); |
| 655 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); |
| 656 | } |
| 657 | |
| 658 | if (Subtarget->hasVOP3PInsts()) { |
| 659 | setOperationAction(ISD::ADD, MVT::v2i16, Legal); |
| 660 | setOperationAction(ISD::SUB, MVT::v2i16, Legal); |
| 661 | setOperationAction(ISD::MUL, MVT::v2i16, Legal); |
| 662 | setOperationAction(ISD::SHL, MVT::v2i16, Legal); |
| 663 | setOperationAction(ISD::SRL, MVT::v2i16, Legal); |
| 664 | setOperationAction(ISD::SRA, MVT::v2i16, Legal); |
| 665 | setOperationAction(ISD::SMIN, MVT::v2i16, Legal); |
| 666 | setOperationAction(ISD::UMIN, MVT::v2i16, Legal); |
| 667 | setOperationAction(ISD::SMAX, MVT::v2i16, Legal); |
| 668 | setOperationAction(ISD::UMAX, MVT::v2i16, Legal); |
| 669 | |
| 670 | setOperationAction(ISD::UADDSAT, MVT::v2i16, Legal); |
| 671 | setOperationAction(ISD::USUBSAT, MVT::v2i16, Legal); |
| 672 | setOperationAction(ISD::SADDSAT, MVT::v2i16, Legal); |
| 673 | setOperationAction(ISD::SSUBSAT, MVT::v2i16, Legal); |
| 674 | |
| 675 | setOperationAction(ISD::FADD, MVT::v2f16, Legal); |
| 676 | setOperationAction(ISD::FMUL, MVT::v2f16, Legal); |
| 677 | setOperationAction(ISD::FMA, MVT::v2f16, Legal); |
| 678 | |
| 679 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); |
| 680 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); |
| 681 | |
| 682 | setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); |
| 683 | |
| 684 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
| 685 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
| 686 | |
| 687 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); |
| 688 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); |
| 689 | |
| 690 | setOperationAction(ISD::SHL, MVT::v4i16, Custom); |
| 691 | setOperationAction(ISD::SRA, MVT::v4i16, Custom); |
| 692 | setOperationAction(ISD::SRL, MVT::v4i16, Custom); |
| 693 | setOperationAction(ISD::ADD, MVT::v4i16, Custom); |
| 694 | setOperationAction(ISD::SUB, MVT::v4i16, Custom); |
| 695 | setOperationAction(ISD::MUL, MVT::v4i16, Custom); |
| 696 | |
| 697 | setOperationAction(ISD::SMIN, MVT::v4i16, Custom); |
| 698 | setOperationAction(ISD::SMAX, MVT::v4i16, Custom); |
| 699 | setOperationAction(ISD::UMIN, MVT::v4i16, Custom); |
| 700 | setOperationAction(ISD::UMAX, MVT::v4i16, Custom); |
| 701 | |
| 702 | setOperationAction(ISD::UADDSAT, MVT::v4i16, Custom); |
| 703 | setOperationAction(ISD::SADDSAT, MVT::v4i16, Custom); |
| 704 | setOperationAction(ISD::USUBSAT, MVT::v4i16, Custom); |
| 705 | setOperationAction(ISD::SSUBSAT, MVT::v4i16, Custom); |
| 706 | |
| 707 | setOperationAction(ISD::FADD, MVT::v4f16, Custom); |
| 708 | setOperationAction(ISD::FMUL, MVT::v4f16, Custom); |
| 709 | setOperationAction(ISD::FMA, MVT::v4f16, Custom); |
| 710 | |
| 711 | setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); |
| 712 | setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); |
| 713 | |
| 714 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); |
| 715 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); |
| 716 | setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); |
| 717 | |
| 718 | setOperationAction(ISD::FEXP, MVT::v2f16, Custom); |
| 719 | setOperationAction(ISD::SELECT, MVT::v4i16, Custom); |
| 720 | setOperationAction(ISD::SELECT, MVT::v4f16, Custom); |
| 721 | } |
| 722 | |
| 723 | setOperationAction(ISD::FNEG, MVT::v4f16, Custom); |
| 724 | setOperationAction(ISD::FABS, MVT::v4f16, Custom); |
| 725 | |
| 726 | if (Subtarget->has16BitInsts()) { |
| 727 | setOperationAction(ISD::SELECT, MVT::v2i16, Promote); |
| 728 | AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); |
| 729 | setOperationAction(ISD::SELECT, MVT::v2f16, Promote); |
| 730 | AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); |
| 731 | } else { |
| 732 | // Legalization hack. |
| 733 | setOperationAction(ISD::SELECT, MVT::v2i16, Custom); |
| 734 | setOperationAction(ISD::SELECT, MVT::v2f16, Custom); |
| 735 | |
| 736 | setOperationAction(ISD::FNEG, MVT::v2f16, Custom); |
| 737 | setOperationAction(ISD::FABS, MVT::v2f16, Custom); |
| 738 | } |
| 739 | |
| 740 | for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { |
| 741 | setOperationAction(ISD::SELECT, VT, Custom); |
| 742 | } |
| 743 | |
| 744 | setOperationAction(ISD::SMULO, MVT::i64, Custom); |
| 745 | setOperationAction(ISD::UMULO, MVT::i64, Custom); |
| 746 | |
| 747 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
| 748 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
| 749 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
| 750 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); |
| 751 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); |
| 752 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); |
| 753 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); |
| 754 | |
| 755 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); |
| 756 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); |
| 757 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3f16, Custom); |
| 758 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3i16, Custom); |
| 759 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); |
| 760 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); |
| 761 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); |
| 762 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
| 763 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); |
| 764 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); |
| 765 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); |
| 766 | |
| 767 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
| 768 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); |
| 769 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); |
| 770 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3i16, Custom); |
| 771 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3f16, Custom); |
| 772 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); |
| 773 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); |
| 774 | setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); |
| 775 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); |
| 776 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); |
| 777 | |
| 778 | setTargetDAGCombine(ISD::ADD); |
| 779 | setTargetDAGCombine(ISD::ADDCARRY); |
| 780 | setTargetDAGCombine(ISD::SUB); |
| 781 | setTargetDAGCombine(ISD::SUBCARRY); |
| 782 | setTargetDAGCombine(ISD::FADD); |
| 783 | setTargetDAGCombine(ISD::FSUB); |
| 784 | setTargetDAGCombine(ISD::FMINNUM); |
| 785 | setTargetDAGCombine(ISD::FMAXNUM); |
| 786 | setTargetDAGCombine(ISD::FMINNUM_IEEE); |
| 787 | setTargetDAGCombine(ISD::FMAXNUM_IEEE); |
| 788 | setTargetDAGCombine(ISD::FMA); |
| 789 | setTargetDAGCombine(ISD::SMIN); |
| 790 | setTargetDAGCombine(ISD::SMAX); |
| 791 | setTargetDAGCombine(ISD::UMIN); |
| 792 | setTargetDAGCombine(ISD::UMAX); |
| 793 | setTargetDAGCombine(ISD::SETCC); |
| 794 | setTargetDAGCombine(ISD::AND); |
| 795 | setTargetDAGCombine(ISD::OR); |
| 796 | setTargetDAGCombine(ISD::XOR); |
| 797 | setTargetDAGCombine(ISD::SINT_TO_FP); |
| 798 | setTargetDAGCombine(ISD::UINT_TO_FP); |
| 799 | setTargetDAGCombine(ISD::FCANONICALIZE); |
| 800 | setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); |
| 801 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
| 802 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); |
| 803 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); |
| 804 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
| 805 | |
| 806 | // All memory operations. Some folding on the pointer operand is done to help |
| 807 | // matching the constant offsets in the addressing modes. |
| 808 | setTargetDAGCombine(ISD::LOAD); |
| 809 | setTargetDAGCombine(ISD::STORE); |
| 810 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
| 811 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
| 812 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
| 813 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
| 814 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
| 815 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
| 816 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
| 817 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
| 818 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
| 819 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
| 820 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
| 821 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
| 822 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
| 823 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
| 824 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
| 825 | setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); |
| 826 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
| 827 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
| 828 | |
| 829 | // FIXME: In other contexts we pretend this is a per-function property. |
| 830 | setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); |
| 831 | |
| 832 | setSchedulingPreference(Sched::RegPressure); |
| 833 | } |
| 834 | |
| 835 | const GCNSubtarget *SITargetLowering::getSubtarget() const { |
| 836 | return Subtarget; |
| 837 | } |
| 838 | |
| 839 | //===----------------------------------------------------------------------===// |
| 840 | // TargetLowering queries |
| 841 | //===----------------------------------------------------------------------===// |
| 842 | |
| 843 | // v_mad_mix* support a conversion from f16 to f32. |
| 844 | // |
| 845 | // There is only one special case when denormals are enabled we don't currently, |
| 846 | // where this is OK to use. |
| 847 | bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, |
| 848 | EVT DestVT, EVT SrcVT) const { |
| 849 | return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || |
| 850 | (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && |
| 851 | DestVT.getScalarType() == MVT::f32 && |
| 852 | SrcVT.getScalarType() == MVT::f16 && |
| 853 | // TODO: This probably only requires no input flushing? |
| 854 | !hasFP32Denormals(DAG.getMachineFunction()); |
| 855 | } |
| 856 | |
| 857 | bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { |
| 858 | // SI has some legal vector types, but no legal vector operations. Say no |
| 859 | // shuffles are legal in order to prefer scalarizing some vector operations. |
| 860 | return false; |
| 861 | } |
| 862 | |
| 863 | MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, |
| 864 | CallingConv::ID CC, |
| 865 | EVT VT) const { |
| 866 | if (CC == CallingConv::AMDGPU_KERNEL) |
| 867 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
| 868 | |
| 869 | if (VT.isVector()) { |
| 870 | EVT ScalarVT = VT.getScalarType(); |
| 871 | unsigned Size = ScalarVT.getSizeInBits(); |
| 872 | if (Size == 16) { |
| 873 | if (Subtarget->has16BitInsts()) |
| 874 | return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
| 875 | return VT.isInteger() ? MVT::i32 : MVT::f32; |
| 876 | } |
| 877 | |
| 878 | if (Size < 16) |
| 879 | return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32; |
| 880 | return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32; |
| 881 | } |
| 882 | |
| 883 | if (VT.getSizeInBits() > 32) |
| 884 | return MVT::i32; |
| 885 | |
| 886 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
| 887 | } |
| 888 | |
| 889 | unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, |
| 890 | CallingConv::ID CC, |
| 891 | EVT VT) const { |
| 892 | if (CC == CallingConv::AMDGPU_KERNEL) |
| 893 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
| 894 | |
| 895 | if (VT.isVector()) { |
| 896 | unsigned NumElts = VT.getVectorNumElements(); |
| 897 | EVT ScalarVT = VT.getScalarType(); |
| 898 | unsigned Size = ScalarVT.getSizeInBits(); |
| 899 | |
| 900 | // FIXME: Should probably promote 8-bit vectors to i16. |
| 901 | if (Size == 16 && Subtarget->has16BitInsts()) |
| 902 | return (NumElts + 1) / 2; |
| 903 | |
| 904 | if (Size <= 32) |
| 905 | return NumElts; |
| 906 | |
| 907 | if (Size > 32) |
| 908 | return NumElts * ((Size + 31) / 32); |
| 909 | } else if (VT.getSizeInBits() > 32) |
| 910 | return (VT.getSizeInBits() + 31) / 32; |
| 911 | |
| 912 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
| 913 | } |
| 914 | |
| 915 | unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( |
| 916 | LLVMContext &Context, CallingConv::ID CC, |
| 917 | EVT VT, EVT &IntermediateVT, |
| 918 | unsigned &NumIntermediates, MVT &RegisterVT) const { |
| 919 | if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { |
| 920 | unsigned NumElts = VT.getVectorNumElements(); |
| 921 | EVT ScalarVT = VT.getScalarType(); |
| 922 | unsigned Size = ScalarVT.getSizeInBits(); |
| 923 | // FIXME: We should fix the ABI to be the same on targets without 16-bit |
| 924 | // support, but unless we can properly handle 3-vectors, it will be still be |
| 925 | // inconsistent. |
| 926 | if (Size == 16 && Subtarget->has16BitInsts()) { |
| 927 | RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
| 928 | IntermediateVT = RegisterVT; |
| 929 | NumIntermediates = (NumElts + 1) / 2; |
| 930 | return NumIntermediates; |
| 931 | } |
| 932 | |
| 933 | if (Size == 32) { |
| 934 | RegisterVT = ScalarVT.getSimpleVT(); |
| 935 | IntermediateVT = RegisterVT; |
| 936 | NumIntermediates = NumElts; |
| 937 | return NumIntermediates; |
| 938 | } |
| 939 | |
| 940 | if (Size < 16 && Subtarget->has16BitInsts()) { |
| 941 | // FIXME: Should probably form v2i16 pieces |
| 942 | RegisterVT = MVT::i16; |
| 943 | IntermediateVT = ScalarVT; |
| 944 | NumIntermediates = NumElts; |
| 945 | return NumIntermediates; |
| 946 | } |
| 947 | |
| 948 | |
| 949 | if (Size != 16 && Size <= 32) { |
| 950 | RegisterVT = MVT::i32; |
| 951 | IntermediateVT = ScalarVT; |
| 952 | NumIntermediates = NumElts; |
| 953 | return NumIntermediates; |
| 954 | } |
| 955 | |
| 956 | if (Size > 32) { |
| 957 | RegisterVT = MVT::i32; |
| 958 | IntermediateVT = RegisterVT; |
| 959 | NumIntermediates = NumElts * ((Size + 31) / 32); |
| 960 | return NumIntermediates; |
| 961 | } |
| 962 | } |
| 963 | |
| 964 | return TargetLowering::getVectorTypeBreakdownForCallingConv( |
| 965 | Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); |
| 966 | } |
| 967 | |
| 968 | static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { |
| 969 | assert(DMaskLanes != 0); |
| 970 | |
| 971 | if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { |
| 972 | unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); |
| 973 | return EVT::getVectorVT(Ty->getContext(), |
| 974 | EVT::getEVT(VT->getElementType()), |
| 975 | NumElts); |
| 976 | } |
| 977 | |
| 978 | return EVT::getEVT(Ty); |
| 979 | } |
| 980 | |
| 981 | // Peek through TFE struct returns to only use the data size. |
| 982 | static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) { |
| 983 | auto *ST = dyn_cast<StructType>(Ty); |
| 984 | if (!ST) |
| 985 | return memVTFromImageData(Ty, DMaskLanes); |
| 986 | |
| 987 | // Some intrinsics return an aggregate type - special case to work out the |
| 988 | // correct memVT. |
| 989 | // |
| 990 | // Only limited forms of aggregate type currently expected. |
| 991 | if (ST->getNumContainedTypes() != 2 || |
| 992 | !ST->getContainedType(1)->isIntegerTy(32)) |
| 993 | return EVT(); |
| 994 | return memVTFromImageData(ST->getContainedType(0), DMaskLanes); |
| 995 | } |
| 996 | |
| 997 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 998 | const CallInst &CI, |
| 999 | MachineFunction &MF, |
| 1000 | unsigned IntrID) const { |
| 1001 | if (const AMDGPU::RsrcIntrinsic *RsrcIntr = |
| 1002 | AMDGPU::lookupRsrcIntrinsic(IntrID)) { |
| 1003 | AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), |
| 1004 | (Intrinsic::ID)IntrID); |
| 1005 | if (Attr.hasFnAttribute(Attribute::ReadNone)) |
| 1006 | return false; |
| 1007 | |
| 1008 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1009 | |
| 1010 | if (RsrcIntr->IsImage) { |
| 1011 | Info.ptrVal = |
| 1012 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
| 1013 | Info.align.reset(); |
| 1014 | } else { |
| 1015 | Info.ptrVal = |
| 1016 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
| 1017 | } |
| 1018 | |
| 1019 | Info.flags = MachineMemOperand::MODereferenceable; |
| 1020 | if (Attr.hasFnAttribute(Attribute::ReadOnly)) { |
| 1021 | unsigned DMaskLanes = 4; |
| 1022 | |
| 1023 | if (RsrcIntr->IsImage) { |
| 1024 | const AMDGPU::ImageDimIntrinsicInfo *Intr |
| 1025 | = AMDGPU::getImageDimIntrinsicInfo(IntrID); |
| 1026 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
| 1027 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
| 1028 | |
| 1029 | if (!BaseOpcode->Gather4) { |
| 1030 | // If this isn't a gather, we may have excess loaded elements in the |
| 1031 | // IR type. Check the dmask for the real number of elements loaded. |
| 1032 | unsigned DMask |
| 1033 | = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); |
| 1034 | DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); |
| 1035 | } |
| 1036 | |
| 1037 | Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes); |
| 1038 | } else |
| 1039 | Info.memVT = EVT::getEVT(CI.getType()); |
| 1040 | |
| 1041 | // FIXME: What does alignment mean for an image? |
| 1042 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1043 | Info.flags |= MachineMemOperand::MOLoad; |
| 1044 | } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { |
| 1045 | Info.opc = ISD::INTRINSIC_VOID; |
| 1046 | |
| 1047 | Type *DataTy = CI.getArgOperand(0)->getType(); |
| 1048 | if (RsrcIntr->IsImage) { |
| 1049 | unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); |
| 1050 | unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); |
| 1051 | Info.memVT = memVTFromImageData(DataTy, DMaskLanes); |
| 1052 | } else |
| 1053 | Info.memVT = EVT::getEVT(DataTy); |
| 1054 | |
| 1055 | Info.flags |= MachineMemOperand::MOStore; |
| 1056 | } else { |
| 1057 | // Atomic |
| 1058 | Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID : |
| 1059 | ISD::INTRINSIC_W_CHAIN; |
| 1060 | Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); |
| 1061 | Info.flags = MachineMemOperand::MOLoad | |
| 1062 | MachineMemOperand::MOStore | |
| 1063 | MachineMemOperand::MODereferenceable; |
| 1064 | |
| 1065 | // XXX - Should this be volatile without known ordering? |
| 1066 | Info.flags |= MachineMemOperand::MOVolatile; |
| 1067 | } |
| 1068 | return true; |
| 1069 | } |
| 1070 | |
| 1071 | switch (IntrID) { |
| 1072 | case Intrinsic::amdgcn_atomic_inc: |
| 1073 | case Intrinsic::amdgcn_atomic_dec: |
| 1074 | case Intrinsic::amdgcn_ds_ordered_add: |
| 1075 | case Intrinsic::amdgcn_ds_ordered_swap: |
| 1076 | case Intrinsic::amdgcn_ds_fadd: |
| 1077 | case Intrinsic::amdgcn_ds_fmin: |
| 1078 | case Intrinsic::amdgcn_ds_fmax: { |
| 1079 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1080 | Info.memVT = MVT::getVT(CI.getType()); |
| 1081 | Info.ptrVal = CI.getOperand(0); |
| 1082 | Info.align.reset(); |
| 1083 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
| 1084 | |
| 1085 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); |
| 1086 | if (!Vol->isZero()) |
| 1087 | Info.flags |= MachineMemOperand::MOVolatile; |
| 1088 | |
| 1089 | return true; |
| 1090 | } |
| 1091 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
| 1092 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1093 | |
| 1094 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1095 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); |
| 1096 | Info.ptrVal = |
| 1097 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
| 1098 | Info.align.reset(); |
| 1099 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
| 1100 | |
| 1101 | const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); |
| 1102 | if (!Vol || !Vol->isZero()) |
| 1103 | Info.flags |= MachineMemOperand::MOVolatile; |
| 1104 | |
| 1105 | return true; |
| 1106 | } |
| 1107 | case Intrinsic::amdgcn_ds_append: |
| 1108 | case Intrinsic::amdgcn_ds_consume: { |
| 1109 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1110 | Info.memVT = MVT::getVT(CI.getType()); |
| 1111 | Info.ptrVal = CI.getOperand(0); |
| 1112 | Info.align.reset(); |
| 1113 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
| 1114 | |
| 1115 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); |
| 1116 | if (!Vol->isZero()) |
| 1117 | Info.flags |= MachineMemOperand::MOVolatile; |
| 1118 | |
| 1119 | return true; |
| 1120 | } |
| 1121 | case Intrinsic::amdgcn_global_atomic_csub: { |
| 1122 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1123 | Info.memVT = MVT::getVT(CI.getType()); |
| 1124 | Info.ptrVal = CI.getOperand(0); |
| 1125 | Info.align.reset(); |
| 1126 | Info.flags = MachineMemOperand::MOLoad | |
| 1127 | MachineMemOperand::MOStore | |
| 1128 | MachineMemOperand::MOVolatile; |
| 1129 | return true; |
| 1130 | } |
| 1131 | case Intrinsic::amdgcn_global_atomic_fadd: { |
| 1132 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1133 | Info.memVT = MVT::getVT(CI.getType()); |
| 1134 | Info.ptrVal = CI.getOperand(0); |
| 1135 | Info.align.reset(); |
| 1136 | Info.flags = MachineMemOperand::MOLoad | |
| 1137 | MachineMemOperand::MOStore | |
| 1138 | MachineMemOperand::MODereferenceable | |
| 1139 | MachineMemOperand::MOVolatile; |
| 1140 | return true; |
| 1141 | } |
| 1142 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { |
| 1143 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1144 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 1145 | Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT? |
| 1146 | Info.ptrVal = |
| 1147 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
| 1148 | Info.align.reset(); |
| 1149 | Info.flags = MachineMemOperand::MOLoad | |
| 1150 | MachineMemOperand::MODereferenceable; |
| 1151 | return true; |
| 1152 | } |
| 1153 | case Intrinsic::amdgcn_ds_gws_init: |
| 1154 | case Intrinsic::amdgcn_ds_gws_barrier: |
| 1155 | case Intrinsic::amdgcn_ds_gws_sema_v: |
| 1156 | case Intrinsic::amdgcn_ds_gws_sema_br: |
| 1157 | case Intrinsic::amdgcn_ds_gws_sema_p: |
| 1158 | case Intrinsic::amdgcn_ds_gws_sema_release_all: { |
| 1159 | Info.opc = ISD::INTRINSIC_VOID; |
| 1160 | |
| 1161 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1162 | Info.ptrVal = |
| 1163 | MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
| 1164 | |
| 1165 | // This is an abstract access, but we need to specify a type and size. |
| 1166 | Info.memVT = MVT::i32; |
| 1167 | Info.size = 4; |
| 1168 | Info.align = Align(4); |
| 1169 | |
| 1170 | Info.flags = MachineMemOperand::MOStore; |
| 1171 | if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) |
| 1172 | Info.flags = MachineMemOperand::MOLoad; |
| 1173 | return true; |
| 1174 | } |
| 1175 | default: |
| 1176 | return false; |
| 1177 | } |
| 1178 | } |
| 1179 | |
| 1180 | bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, |
| 1181 | SmallVectorImpl<Value*> &Ops, |
| 1182 | Type *&AccessTy) const { |
| 1183 | switch (II->getIntrinsicID()) { |
| 1184 | case Intrinsic::amdgcn_atomic_inc: |
| 1185 | case Intrinsic::amdgcn_atomic_dec: |
| 1186 | case Intrinsic::amdgcn_ds_ordered_add: |
| 1187 | case Intrinsic::amdgcn_ds_ordered_swap: |
| 1188 | case Intrinsic::amdgcn_ds_append: |
| 1189 | case Intrinsic::amdgcn_ds_consume: |
| 1190 | case Intrinsic::amdgcn_ds_fadd: |
| 1191 | case Intrinsic::amdgcn_ds_fmin: |
| 1192 | case Intrinsic::amdgcn_ds_fmax: |
| 1193 | case Intrinsic::amdgcn_global_atomic_fadd: |
| 1194 | case Intrinsic::amdgcn_global_atomic_csub: { |
| 1195 | Value *Ptr = II->getArgOperand(0); |
| 1196 | AccessTy = II->getType(); |
| 1197 | Ops.push_back(Ptr); |
| 1198 | return true; |
| 1199 | } |
| 1200 | default: |
| 1201 | return false; |
| 1202 | } |
| 1203 | } |
| 1204 | |
| 1205 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { |
| 1206 | if (!Subtarget->hasFlatInstOffsets()) { |
| 1207 | // Flat instructions do not have offsets, and only have the register |
| 1208 | // address. |
| 1209 | return AM.BaseOffs == 0 && AM.Scale == 0; |
| 1210 | } |
| 1211 | |
| 1212 | return AM.Scale == 0 && |
| 1213 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( |
| 1214 | AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, |
| 1215 | /*Signed=*/false)); |
| 1216 | } |
| 1217 | |
| 1218 | bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { |
| 1219 | if (Subtarget->hasFlatGlobalInsts()) |
| 1220 | return AM.Scale == 0 && |
| 1221 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( |
| 1222 | AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, |
| 1223 | /*Signed=*/true)); |
| 1224 | |
| 1225 | if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { |
| 1226 | // Assume the we will use FLAT for all global memory accesses |
| 1227 | // on VI. |
| 1228 | // FIXME: This assumption is currently wrong. On VI we still use |
| 1229 | // MUBUF instructions for the r + i addressing mode. As currently |
| 1230 | // implemented, the MUBUF instructions only work on buffer < 4GB. |
| 1231 | // It may be possible to support > 4GB buffers with MUBUF instructions, |
| 1232 | // by setting the stride value in the resource descriptor which would |
| 1233 | // increase the size limit to (stride * 4GB). However, this is risky, |
| 1234 | // because it has never been validated. |
| 1235 | return isLegalFlatAddressingMode(AM); |
| 1236 | } |
| 1237 | |
| 1238 | return isLegalMUBUFAddressingMode(AM); |
| 1239 | } |
| 1240 | |
| 1241 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { |
| 1242 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and |
| 1243 | // additionally can do r + r + i with addr64. 32-bit has more addressing |
| 1244 | // mode options. Depending on the resource constant, it can also do |
| 1245 | // (i64 r0) + (i32 r1) * (i14 i). |
| 1246 | // |
| 1247 | // Private arrays end up using a scratch buffer most of the time, so also |
| 1248 | // assume those use MUBUF instructions. Scratch loads / stores are currently |
| 1249 | // implemented as mubuf instructions with offen bit set, so slightly |
| 1250 | // different than the normal addr64. |
| 1251 | if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs)) |
| 1252 | return false; |
| 1253 | |
| 1254 | // FIXME: Since we can split immediate into soffset and immediate offset, |
| 1255 | // would it make sense to allow any immediate? |
| 1256 | |
| 1257 | switch (AM.Scale) { |
| 1258 | case 0: // r + i or just i, depending on HasBaseReg. |
| 1259 | return true; |
| 1260 | case 1: |
| 1261 | return true; // We have r + r or r + i. |
| 1262 | case 2: |
| 1263 | if (AM.HasBaseReg) { |
| 1264 | // Reject 2 * r + r. |
| 1265 | return false; |
| 1266 | } |
| 1267 | |
| 1268 | // Allow 2 * r as r + r |
| 1269 | // Or 2 * r + i is allowed as r + r + i. |
| 1270 | return true; |
| 1271 | default: // Don't allow n * r |
| 1272 | return false; |
| 1273 | } |
| 1274 | } |
| 1275 | |
| 1276 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 1277 | const AddrMode &AM, Type *Ty, |
| 1278 | unsigned AS, Instruction *I) const { |
| 1279 | // No global is ever allowed as a base. |
| 1280 | if (AM.BaseGV) |
| 1281 | return false; |
| 1282 | |
| 1283 | if (AS == AMDGPUAS::GLOBAL_ADDRESS) |
| 1284 | return isLegalGlobalAddressingMode(AM); |
| 1285 | |
| 1286 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
| 1287 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
| 1288 | AS == AMDGPUAS::BUFFER_FAT_POINTER) { |
| 1289 | // If the offset isn't a multiple of 4, it probably isn't going to be |
| 1290 | // correctly aligned. |
| 1291 | // FIXME: Can we get the real alignment here? |
| 1292 | if (AM.BaseOffs % 4 != 0) |
| 1293 | return isLegalMUBUFAddressingMode(AM); |
| 1294 | |
| 1295 | // There are no SMRD extloads, so if we have to do a small type access we |
| 1296 | // will use a MUBUF load. |
| 1297 | // FIXME?: We also need to do this if unaligned, but we don't know the |
| 1298 | // alignment here. |
| 1299 | if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) |
| 1300 | return isLegalGlobalAddressingMode(AM); |
| 1301 | |
| 1302 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
| 1303 | // SMRD instructions have an 8-bit, dword offset on SI. |
| 1304 | if (!isUInt<8>(AM.BaseOffs / 4)) |
| 1305 | return false; |
| 1306 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { |
| 1307 | // On CI+, this can also be a 32-bit literal constant offset. If it fits |
| 1308 | // in 8-bits, it can use a smaller encoding. |
| 1309 | if (!isUInt<32>(AM.BaseOffs / 4)) |
| 1310 | return false; |
| 1311 | } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { |
| 1312 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. |
| 1313 | if (!isUInt<20>(AM.BaseOffs)) |
| 1314 | return false; |
| 1315 | } else |
| 1316 | llvm_unreachable("unhandled generation" ); |
| 1317 | |
| 1318 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
| 1319 | return true; |
| 1320 | |
| 1321 | if (AM.Scale == 1 && AM.HasBaseReg) |
| 1322 | return true; |
| 1323 | |
| 1324 | return false; |
| 1325 | |
| 1326 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 1327 | return isLegalMUBUFAddressingMode(AM); |
| 1328 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || |
| 1329 | AS == AMDGPUAS::REGION_ADDRESS) { |
| 1330 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate |
| 1331 | // field. |
| 1332 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have |
| 1333 | // an 8-bit dword offset but we don't know the alignment here. |
| 1334 | if (!isUInt<16>(AM.BaseOffs)) |
| 1335 | return false; |
| 1336 | |
| 1337 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
| 1338 | return true; |
| 1339 | |
| 1340 | if (AM.Scale == 1 && AM.HasBaseReg) |
| 1341 | return true; |
| 1342 | |
| 1343 | return false; |
| 1344 | } else if (AS == AMDGPUAS::FLAT_ADDRESS || |
| 1345 | AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { |
| 1346 | // For an unknown address space, this usually means that this is for some |
| 1347 | // reason being used for pure arithmetic, and not based on some addressing |
| 1348 | // computation. We don't have instructions that compute pointers with any |
| 1349 | // addressing modes, so treat them as having no offset like flat |
| 1350 | // instructions. |
| 1351 | return isLegalFlatAddressingMode(AM); |
| 1352 | } |
| 1353 | |
| 1354 | // Assume a user alias of global for unknown address spaces. |
| 1355 | return isLegalGlobalAddressingMode(AM); |
| 1356 | } |
| 1357 | |
| 1358 | bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, |
| 1359 | const SelectionDAG &DAG) const { |
| 1360 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { |
| 1361 | return (MemVT.getSizeInBits() <= 4 * 32); |
| 1362 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 1363 | unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); |
| 1364 | return (MemVT.getSizeInBits() <= MaxPrivateBits); |
| 1365 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
| 1366 | return (MemVT.getSizeInBits() <= 2 * 32); |
| 1367 | } |
| 1368 | return true; |
| 1369 | } |
| 1370 | |
| 1371 | bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( |
| 1372 | unsigned Size, unsigned AddrSpace, Align Alignment, |
| 1373 | MachineMemOperand::Flags Flags, bool *IsFast) const { |
| 1374 | if (IsFast) |
| 1375 | *IsFast = false; |
| 1376 | |
| 1377 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
| 1378 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { |
| 1379 | // Check if alignment requirements for ds_read/write instructions are |
| 1380 | // disabled. |
| 1381 | if (Subtarget->hasUnalignedDSAccessEnabled() && |
| 1382 | !Subtarget->hasLDSMisalignedBug()) { |
| 1383 | if (IsFast) |
| 1384 | *IsFast = Alignment != Align(2); |
| 1385 | return true; |
| 1386 | } |
| 1387 | |
| 1388 | if (Size == 64) { |
| 1389 | // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte |
| 1390 | // aligned, 8 byte access in a single operation using ds_read2/write2_b32 |
| 1391 | // with adjacent offsets. |
| 1392 | bool AlignedBy4 = Alignment >= Align(4); |
| 1393 | if (IsFast) |
| 1394 | *IsFast = AlignedBy4; |
| 1395 | |
| 1396 | return AlignedBy4; |
| 1397 | } |
| 1398 | if (Size == 96) { |
| 1399 | // ds_read/write_b96 require 16-byte alignment on gfx8 and older. |
| 1400 | bool Aligned = Alignment >= Align(16); |
| 1401 | if (IsFast) |
| 1402 | *IsFast = Aligned; |
| 1403 | |
| 1404 | return Aligned; |
| 1405 | } |
| 1406 | if (Size == 128) { |
| 1407 | // ds_read/write_b128 require 16-byte alignment on gfx8 and older, but we |
| 1408 | // can do a 8 byte aligned, 16 byte access in a single operation using |
| 1409 | // ds_read2/write2_b64. |
| 1410 | bool Aligned = Alignment >= Align(8); |
| 1411 | if (IsFast) |
| 1412 | *IsFast = Aligned; |
| 1413 | |
| 1414 | return Aligned; |
| 1415 | } |
| 1416 | } |
| 1417 | |
| 1418 | if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { |
| 1419 | bool AlignedBy4 = Alignment >= Align(4); |
| 1420 | if (IsFast) |
| 1421 | *IsFast = AlignedBy4; |
| 1422 | |
| 1423 | return AlignedBy4 || |
| 1424 | Subtarget->enableFlatScratch() || |
| 1425 | Subtarget->hasUnalignedScratchAccess(); |
| 1426 | } |
| 1427 | |
| 1428 | // FIXME: We have to be conservative here and assume that flat operations |
| 1429 | // will access scratch. If we had access to the IR function, then we |
| 1430 | // could determine if any private memory was used in the function. |
| 1431 | if (AddrSpace == AMDGPUAS::FLAT_ADDRESS && |
| 1432 | !Subtarget->hasUnalignedScratchAccess()) { |
| 1433 | bool AlignedBy4 = Alignment >= Align(4); |
| 1434 | if (IsFast) |
| 1435 | *IsFast = AlignedBy4; |
| 1436 | |
| 1437 | return AlignedBy4; |
| 1438 | } |
| 1439 | |
| 1440 | if (Subtarget->hasUnalignedBufferAccessEnabled() && |
| 1441 | !(AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
| 1442 | AddrSpace == AMDGPUAS::REGION_ADDRESS)) { |
| 1443 | // If we have an uniform constant load, it still requires using a slow |
| 1444 | // buffer instruction if unaligned. |
| 1445 | if (IsFast) { |
| 1446 | // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so |
| 1447 | // 2-byte alignment is worse than 1 unless doing a 2-byte accesss. |
| 1448 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || |
| 1449 | AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? |
| 1450 | Alignment >= Align(4) : Alignment != Align(2); |
| 1451 | } |
| 1452 | |
| 1453 | return true; |
| 1454 | } |
| 1455 | |
| 1456 | // Smaller than dword value must be aligned. |
| 1457 | if (Size < 32) |
| 1458 | return false; |
| 1459 | |
| 1460 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the |
| 1461 | // byte-address are ignored, thus forcing Dword alignment. |
| 1462 | // This applies to private, global, and constant memory. |
| 1463 | if (IsFast) |
| 1464 | *IsFast = true; |
| 1465 | |
| 1466 | return Size >= 32 && Alignment >= Align(4); |
| 1467 | } |
| 1468 | |
| 1469 | bool SITargetLowering::allowsMisalignedMemoryAccesses( |
| 1470 | EVT VT, unsigned AddrSpace, unsigned Alignment, |
| 1471 | MachineMemOperand::Flags Flags, bool *IsFast) const { |
| 1472 | if (IsFast) |
| 1473 | *IsFast = false; |
| 1474 | |
| 1475 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, |
| 1476 | // which isn't a simple VT. |
| 1477 | // Until MVT is extended to handle this, simply check for the size and |
| 1478 | // rely on the condition below: allow accesses if the size is a multiple of 4. |
| 1479 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && |
| 1480 | VT.getStoreSize() > 16)) { |
| 1481 | return false; |
| 1482 | } |
| 1483 | |
| 1484 | return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, |
| 1485 | Align(Alignment), Flags, IsFast); |
| 1486 | } |
| 1487 | |
| 1488 | EVT SITargetLowering::getOptimalMemOpType( |
| 1489 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
| 1490 | // FIXME: Should account for address space here. |
| 1491 | |
| 1492 | // The default fallback uses the private pointer size as a guess for a type to |
| 1493 | // use. Make sure we switch these to 64-bit accesses. |
| 1494 | |
| 1495 | if (Op.size() >= 16 && |
| 1496 | Op.isDstAligned(Align(4))) // XXX: Should only do for global |
| 1497 | return MVT::v4i32; |
| 1498 | |
| 1499 | if (Op.size() >= 8 && Op.isDstAligned(Align(4))) |
| 1500 | return MVT::v2i32; |
| 1501 | |
| 1502 | // Use the default. |
| 1503 | return MVT::Other; |
| 1504 | } |
| 1505 | |
| 1506 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { |
| 1507 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
| 1508 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
| 1509 | const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); |
| 1510 | return I && I->getMetadata("amdgpu.noclobber" ); |
| 1511 | } |
| 1512 | |
| 1513 | bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { |
| 1514 | return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS || |
| 1515 | AS == AMDGPUAS::PRIVATE_ADDRESS; |
| 1516 | } |
| 1517 | |
| 1518 | bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, |
| 1519 | unsigned DestAS) const { |
| 1520 | // Flat -> private/local is a simple truncate. |
| 1521 | // Flat -> global is no-op |
| 1522 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) |
| 1523 | return true; |
| 1524 | |
| 1525 | const GCNTargetMachine &TM = |
| 1526 | static_cast<const GCNTargetMachine &>(getTargetMachine()); |
| 1527 | return TM.isNoopAddrSpaceCast(SrcAS, DestAS); |
| 1528 | } |
| 1529 | |
| 1530 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { |
| 1531 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
| 1532 | |
| 1533 | return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); |
| 1534 | } |
| 1535 | |
| 1536 | TargetLoweringBase::LegalizeTypeAction |
| 1537 | SITargetLowering::getPreferredVectorAction(MVT VT) const { |
| 1538 | int NumElts = VT.getVectorNumElements(); |
| 1539 | if (NumElts != 1 && VT.getScalarType().bitsLE(MVT::i16)) |
| 1540 | return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; |
| 1541 | return TargetLoweringBase::getPreferredVectorAction(VT); |
| 1542 | } |
| 1543 | |
| 1544 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 1545 | Type *Ty) const { |
| 1546 | // FIXME: Could be smarter if called for vector constants. |
| 1547 | return true; |
| 1548 | } |
| 1549 | |
| 1550 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { |
| 1551 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { |
| 1552 | switch (Op) { |
| 1553 | case ISD::LOAD: |
| 1554 | case ISD::STORE: |
| 1555 | |
| 1556 | // These operations are done with 32-bit instructions anyway. |
| 1557 | case ISD::AND: |
| 1558 | case ISD::OR: |
| 1559 | case ISD::XOR: |
| 1560 | case ISD::SELECT: |
| 1561 | // TODO: Extensions? |
| 1562 | return true; |
| 1563 | default: |
| 1564 | return false; |
| 1565 | } |
| 1566 | } |
| 1567 | |
| 1568 | // SimplifySetCC uses this function to determine whether or not it should |
| 1569 | // create setcc with i1 operands. We don't have instructions for i1 setcc. |
| 1570 | if (VT == MVT::i1 && Op == ISD::SETCC) |
| 1571 | return false; |
| 1572 | |
| 1573 | return TargetLowering::isTypeDesirableForOp(Op, VT); |
| 1574 | } |
| 1575 | |
| 1576 | SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, |
| 1577 | const SDLoc &SL, |
| 1578 | SDValue Chain, |
| 1579 | uint64_t Offset) const { |
| 1580 | const DataLayout &DL = DAG.getDataLayout(); |
| 1581 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1582 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 1583 | |
| 1584 | const ArgDescriptor *InputPtrReg; |
| 1585 | const TargetRegisterClass *RC; |
| 1586 | LLT ArgTy; |
| 1587 | |
| 1588 | std::tie(InputPtrReg, RC, ArgTy) = |
| 1589 | Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
| 1590 | |
| 1591 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
| 1592 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); |
| 1593 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
| 1594 | MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); |
| 1595 | |
| 1596 | return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset)); |
| 1597 | } |
| 1598 | |
| 1599 | SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, |
| 1600 | const SDLoc &SL) const { |
| 1601 | uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), |
| 1602 | FIRST_IMPLICIT); |
| 1603 | return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); |
| 1604 | } |
| 1605 | |
| 1606 | SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, |
| 1607 | const SDLoc &SL, SDValue Val, |
| 1608 | bool Signed, |
| 1609 | const ISD::InputArg *Arg) const { |
| 1610 | // First, if it is a widened vector, narrow it. |
| 1611 | if (VT.isVector() && |
| 1612 | VT.getVectorNumElements() != MemVT.getVectorNumElements()) { |
| 1613 | EVT NarrowedVT = |
| 1614 | EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), |
| 1615 | VT.getVectorNumElements()); |
| 1616 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, |
| 1617 | DAG.getConstant(0, SL, MVT::i32)); |
| 1618 | } |
| 1619 | |
| 1620 | // Then convert the vector elements or scalar value. |
| 1621 | if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && |
| 1622 | VT.bitsLT(MemVT)) { |
| 1623 | unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; |
| 1624 | Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); |
| 1625 | } |
| 1626 | |
| 1627 | if (MemVT.isFloatingPoint()) |
| 1628 | Val = getFPExtOrFPRound(DAG, Val, SL, VT); |
| 1629 | else if (Signed) |
| 1630 | Val = DAG.getSExtOrTrunc(Val, SL, VT); |
| 1631 | else |
| 1632 | Val = DAG.getZExtOrTrunc(Val, SL, VT); |
| 1633 | |
| 1634 | return Val; |
| 1635 | } |
| 1636 | |
| 1637 | SDValue SITargetLowering::lowerKernargMemParameter( |
| 1638 | SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain, |
| 1639 | uint64_t Offset, Align Alignment, bool Signed, |
| 1640 | const ISD::InputArg *Arg) const { |
| 1641 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
| 1642 | |
| 1643 | // Try to avoid using an extload by loading earlier than the argument address, |
| 1644 | // and extracting the relevant bits. The load should hopefully be merged with |
| 1645 | // the previous argument. |
| 1646 | if (MemVT.getStoreSize() < 4 && Alignment < 4) { |
| 1647 | // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). |
| 1648 | int64_t AlignDownOffset = alignDown(Offset, 4); |
| 1649 | int64_t OffsetDiff = Offset - AlignDownOffset; |
| 1650 | |
| 1651 | EVT IntVT = MemVT.changeTypeToInteger(); |
| 1652 | |
| 1653 | // TODO: If we passed in the base kernel offset we could have a better |
| 1654 | // alignment than 4, but we don't really need it. |
| 1655 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); |
| 1656 | SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4), |
| 1657 | MachineMemOperand::MODereferenceable | |
| 1658 | MachineMemOperand::MOInvariant); |
| 1659 | |
| 1660 | SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); |
| 1661 | SDValue = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); |
| 1662 | |
| 1663 | SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); |
| 1664 | ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); |
| 1665 | ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); |
| 1666 | |
| 1667 | |
| 1668 | return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); |
| 1669 | } |
| 1670 | |
| 1671 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); |
| 1672 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment, |
| 1673 | MachineMemOperand::MODereferenceable | |
| 1674 | MachineMemOperand::MOInvariant); |
| 1675 | |
| 1676 | SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); |
| 1677 | return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); |
| 1678 | } |
| 1679 | |
| 1680 | SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, |
| 1681 | const SDLoc &SL, SDValue Chain, |
| 1682 | const ISD::InputArg &Arg) const { |
| 1683 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1684 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 1685 | |
| 1686 | if (Arg.Flags.isByVal()) { |
| 1687 | unsigned Size = Arg.Flags.getByValSize(); |
| 1688 | int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); |
| 1689 | return DAG.getFrameIndex(FrameIdx, MVT::i32); |
| 1690 | } |
| 1691 | |
| 1692 | unsigned ArgOffset = VA.getLocMemOffset(); |
| 1693 | unsigned ArgSize = VA.getValVT().getStoreSize(); |
| 1694 | |
| 1695 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); |
| 1696 | |
| 1697 | // Create load nodes to retrieve arguments from the stack. |
| 1698 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); |
| 1699 | SDValue ArgValue; |
| 1700 | |
| 1701 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) |
| 1702 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; |
| 1703 | MVT MemVT = VA.getValVT(); |
| 1704 | |
| 1705 | switch (VA.getLocInfo()) { |
| 1706 | default: |
| 1707 | break; |
| 1708 | case CCValAssign::BCvt: |
| 1709 | MemVT = VA.getLocVT(); |
| 1710 | break; |
| 1711 | case CCValAssign::SExt: |
| 1712 | ExtType = ISD::SEXTLOAD; |
| 1713 | break; |
| 1714 | case CCValAssign::ZExt: |
| 1715 | ExtType = ISD::ZEXTLOAD; |
| 1716 | break; |
| 1717 | case CCValAssign::AExt: |
| 1718 | ExtType = ISD::EXTLOAD; |
| 1719 | break; |
| 1720 | } |
| 1721 | |
| 1722 | ArgValue = DAG.getExtLoad( |
| 1723 | ExtType, SL, VA.getLocVT(), Chain, FIN, |
| 1724 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), |
| 1725 | MemVT); |
| 1726 | return ArgValue; |
| 1727 | } |
| 1728 | |
| 1729 | SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, |
| 1730 | const SIMachineFunctionInfo &MFI, |
| 1731 | EVT VT, |
| 1732 | AMDGPUFunctionArgInfo::PreloadedValue PVID) const { |
| 1733 | const ArgDescriptor *Reg; |
| 1734 | const TargetRegisterClass *RC; |
| 1735 | LLT Ty; |
| 1736 | |
| 1737 | std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID); |
| 1738 | return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); |
| 1739 | } |
| 1740 | |
| 1741 | static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, |
| 1742 | CallingConv::ID CallConv, |
| 1743 | ArrayRef<ISD::InputArg> Ins, BitVector &Skipped, |
| 1744 | FunctionType *FType, |
| 1745 | SIMachineFunctionInfo *Info) { |
| 1746 | for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { |
| 1747 | const ISD::InputArg *Arg = &Ins[I]; |
| 1748 | |
| 1749 | assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && |
| 1750 | "vector type argument should have been split" ); |
| 1751 | |
| 1752 | // First check if it's a PS input addr. |
| 1753 | if (CallConv == CallingConv::AMDGPU_PS && |
| 1754 | !Arg->Flags.isInReg() && PSInputNum <= 15) { |
| 1755 | bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); |
| 1756 | |
| 1757 | // Inconveniently only the first part of the split is marked as isSplit, |
| 1758 | // so skip to the end. We only want to increment PSInputNum once for the |
| 1759 | // entire split argument. |
| 1760 | if (Arg->Flags.isSplit()) { |
| 1761 | while (!Arg->Flags.isSplitEnd()) { |
| 1762 | assert((!Arg->VT.isVector() || |
| 1763 | Arg->VT.getScalarSizeInBits() == 16) && |
| 1764 | "unexpected vector split in ps argument type" ); |
| 1765 | if (!SkipArg) |
| 1766 | Splits.push_back(*Arg); |
| 1767 | Arg = &Ins[++I]; |
| 1768 | } |
| 1769 | } |
| 1770 | |
| 1771 | if (SkipArg) { |
| 1772 | // We can safely skip PS inputs. |
| 1773 | Skipped.set(Arg->getOrigArgIndex()); |
| 1774 | ++PSInputNum; |
| 1775 | continue; |
| 1776 | } |
| 1777 | |
| 1778 | Info->markPSInputAllocated(PSInputNum); |
| 1779 | if (Arg->Used) |
| 1780 | Info->markPSInputEnabled(PSInputNum); |
| 1781 | |
| 1782 | ++PSInputNum; |
| 1783 | } |
| 1784 | |
| 1785 | Splits.push_back(*Arg); |
| 1786 | } |
| 1787 | } |
| 1788 | |
| 1789 | // Allocate special inputs passed in VGPRs. |
| 1790 | void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, |
| 1791 | MachineFunction &MF, |
| 1792 | const SIRegisterInfo &TRI, |
| 1793 | SIMachineFunctionInfo &Info) const { |
| 1794 | const LLT S32 = LLT::scalar(32); |
| 1795 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 1796 | |
| 1797 | if (Info.hasWorkItemIDX()) { |
| 1798 | Register Reg = AMDGPU::VGPR0; |
| 1799 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
| 1800 | |
| 1801 | CCInfo.AllocateReg(Reg); |
| 1802 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); |
| 1803 | } |
| 1804 | |
| 1805 | if (Info.hasWorkItemIDY()) { |
| 1806 | Register Reg = AMDGPU::VGPR1; |
| 1807 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
| 1808 | |
| 1809 | CCInfo.AllocateReg(Reg); |
| 1810 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); |
| 1811 | } |
| 1812 | |
| 1813 | if (Info.hasWorkItemIDZ()) { |
| 1814 | Register Reg = AMDGPU::VGPR2; |
| 1815 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
| 1816 | |
| 1817 | CCInfo.AllocateReg(Reg); |
| 1818 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); |
| 1819 | } |
| 1820 | } |
| 1821 | |
| 1822 | // Try to allocate a VGPR at the end of the argument list, or if no argument |
| 1823 | // VGPRs are left allocating a stack slot. |
| 1824 | // If \p Mask is is given it indicates bitfield position in the register. |
| 1825 | // If \p Arg is given use it with new ]p Mask instead of allocating new. |
| 1826 | static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, |
| 1827 | ArgDescriptor Arg = ArgDescriptor()) { |
| 1828 | if (Arg.isSet()) |
| 1829 | return ArgDescriptor::createArg(Arg, Mask); |
| 1830 | |
| 1831 | ArrayRef<MCPhysReg> ArgVGPRs |
| 1832 | = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); |
| 1833 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); |
| 1834 | if (RegIdx == ArgVGPRs.size()) { |
| 1835 | // Spill to stack required. |
| 1836 | int64_t Offset = CCInfo.AllocateStack(4, Align(4)); |
| 1837 | |
| 1838 | return ArgDescriptor::createStack(Offset, Mask); |
| 1839 | } |
| 1840 | |
| 1841 | unsigned Reg = ArgVGPRs[RegIdx]; |
| 1842 | Reg = CCInfo.AllocateReg(Reg); |
| 1843 | assert(Reg != AMDGPU::NoRegister); |
| 1844 | |
| 1845 | MachineFunction &MF = CCInfo.getMachineFunction(); |
| 1846 | Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
| 1847 | MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); |
| 1848 | return ArgDescriptor::createRegister(Reg, Mask); |
| 1849 | } |
| 1850 | |
| 1851 | static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, |
| 1852 | const TargetRegisterClass *RC, |
| 1853 | unsigned NumArgRegs) { |
| 1854 | ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); |
| 1855 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); |
| 1856 | if (RegIdx == ArgSGPRs.size()) |
| 1857 | report_fatal_error("ran out of SGPRs for arguments" ); |
| 1858 | |
| 1859 | unsigned Reg = ArgSGPRs[RegIdx]; |
| 1860 | Reg = CCInfo.AllocateReg(Reg); |
| 1861 | assert(Reg != AMDGPU::NoRegister); |
| 1862 | |
| 1863 | MachineFunction &MF = CCInfo.getMachineFunction(); |
| 1864 | MF.addLiveIn(Reg, RC); |
| 1865 | return ArgDescriptor::createRegister(Reg); |
| 1866 | } |
| 1867 | |
| 1868 | static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { |
| 1869 | return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); |
| 1870 | } |
| 1871 | |
| 1872 | static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { |
| 1873 | return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); |
| 1874 | } |
| 1875 | |
| 1876 | /// Allocate implicit function VGPR arguments at the end of allocated user |
| 1877 | /// arguments. |
| 1878 | void SITargetLowering::allocateSpecialInputVGPRs( |
| 1879 | CCState &CCInfo, MachineFunction &MF, |
| 1880 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { |
| 1881 | const unsigned Mask = 0x3ff; |
| 1882 | ArgDescriptor Arg; |
| 1883 | |
| 1884 | if (Info.hasWorkItemIDX()) { |
| 1885 | Arg = allocateVGPR32Input(CCInfo, Mask); |
| 1886 | Info.setWorkItemIDX(Arg); |
| 1887 | } |
| 1888 | |
| 1889 | if (Info.hasWorkItemIDY()) { |
| 1890 | Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); |
| 1891 | Info.setWorkItemIDY(Arg); |
| 1892 | } |
| 1893 | |
| 1894 | if (Info.hasWorkItemIDZ()) |
| 1895 | Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); |
| 1896 | } |
| 1897 | |
| 1898 | /// Allocate implicit function VGPR arguments in fixed registers. |
| 1899 | void SITargetLowering::allocateSpecialInputVGPRsFixed( |
| 1900 | CCState &CCInfo, MachineFunction &MF, |
| 1901 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { |
| 1902 | Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31); |
| 1903 | if (!Reg) |
| 1904 | report_fatal_error("failed to allocated VGPR for implicit arguments" ); |
| 1905 | |
| 1906 | const unsigned Mask = 0x3ff; |
| 1907 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); |
| 1908 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10)); |
| 1909 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20)); |
| 1910 | } |
| 1911 | |
| 1912 | void SITargetLowering::allocateSpecialInputSGPRs( |
| 1913 | CCState &CCInfo, |
| 1914 | MachineFunction &MF, |
| 1915 | const SIRegisterInfo &TRI, |
| 1916 | SIMachineFunctionInfo &Info) const { |
| 1917 | auto &ArgInfo = Info.getArgInfo(); |
| 1918 | |
| 1919 | // TODO: Unify handling with private memory pointers. |
| 1920 | |
| 1921 | if (Info.hasDispatchPtr()) |
| 1922 | ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); |
| 1923 | |
| 1924 | if (Info.hasQueuePtr()) |
| 1925 | ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); |
| 1926 | |
| 1927 | // Implicit arg ptr takes the place of the kernarg segment pointer. This is a |
| 1928 | // constant offset from the kernarg segment. |
| 1929 | if (Info.hasImplicitArgPtr()) |
| 1930 | ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); |
| 1931 | |
| 1932 | if (Info.hasDispatchID()) |
| 1933 | ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); |
| 1934 | |
| 1935 | // flat_scratch_init is not applicable for non-kernel functions. |
| 1936 | |
| 1937 | if (Info.hasWorkGroupIDX()) |
| 1938 | ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); |
| 1939 | |
| 1940 | if (Info.hasWorkGroupIDY()) |
| 1941 | ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); |
| 1942 | |
| 1943 | if (Info.hasWorkGroupIDZ()) |
| 1944 | ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); |
| 1945 | } |
| 1946 | |
| 1947 | // Allocate special inputs passed in user SGPRs. |
| 1948 | void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, |
| 1949 | MachineFunction &MF, |
| 1950 | const SIRegisterInfo &TRI, |
| 1951 | SIMachineFunctionInfo &Info) const { |
| 1952 | if (Info.hasImplicitBufferPtr()) { |
| 1953 | Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); |
| 1954 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); |
| 1955 | CCInfo.AllocateReg(ImplicitBufferPtrReg); |
| 1956 | } |
| 1957 | |
| 1958 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? |
| 1959 | if (Info.hasPrivateSegmentBuffer()) { |
| 1960 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); |
| 1961 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); |
| 1962 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
| 1963 | } |
| 1964 | |
| 1965 | if (Info.hasDispatchPtr()) { |
| 1966 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); |
| 1967 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); |
| 1968 | CCInfo.AllocateReg(DispatchPtrReg); |
| 1969 | } |
| 1970 | |
| 1971 | if (Info.hasQueuePtr()) { |
| 1972 | Register QueuePtrReg = Info.addQueuePtr(TRI); |
| 1973 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); |
| 1974 | CCInfo.AllocateReg(QueuePtrReg); |
| 1975 | } |
| 1976 | |
| 1977 | if (Info.hasKernargSegmentPtr()) { |
| 1978 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 1979 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); |
| 1980 | CCInfo.AllocateReg(InputPtrReg); |
| 1981 | |
| 1982 | Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); |
| 1983 | MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); |
| 1984 | } |
| 1985 | |
| 1986 | if (Info.hasDispatchID()) { |
| 1987 | Register DispatchIDReg = Info.addDispatchID(TRI); |
| 1988 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); |
| 1989 | CCInfo.AllocateReg(DispatchIDReg); |
| 1990 | } |
| 1991 | |
| 1992 | if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) { |
| 1993 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); |
| 1994 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); |
| 1995 | CCInfo.AllocateReg(FlatScratchInitReg); |
| 1996 | } |
| 1997 | |
| 1998 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read |
| 1999 | // these from the dispatch pointer. |
| 2000 | } |
| 2001 | |
| 2002 | // Allocate special input registers that are initialized per-wave. |
| 2003 | void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, |
| 2004 | MachineFunction &MF, |
| 2005 | SIMachineFunctionInfo &Info, |
| 2006 | CallingConv::ID CallConv, |
| 2007 | bool IsShader) const { |
| 2008 | if (Info.hasWorkGroupIDX()) { |
| 2009 | Register Reg = Info.addWorkGroupIDX(); |
| 2010 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
| 2011 | CCInfo.AllocateReg(Reg); |
| 2012 | } |
| 2013 | |
| 2014 | if (Info.hasWorkGroupIDY()) { |
| 2015 | Register Reg = Info.addWorkGroupIDY(); |
| 2016 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
| 2017 | CCInfo.AllocateReg(Reg); |
| 2018 | } |
| 2019 | |
| 2020 | if (Info.hasWorkGroupIDZ()) { |
| 2021 | Register Reg = Info.addWorkGroupIDZ(); |
| 2022 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
| 2023 | CCInfo.AllocateReg(Reg); |
| 2024 | } |
| 2025 | |
| 2026 | if (Info.hasWorkGroupInfo()) { |
| 2027 | Register Reg = Info.addWorkGroupInfo(); |
| 2028 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
| 2029 | CCInfo.AllocateReg(Reg); |
| 2030 | } |
| 2031 | |
| 2032 | if (Info.hasPrivateSegmentWaveByteOffset()) { |
| 2033 | // Scratch wave offset passed in system SGPR. |
| 2034 | unsigned PrivateSegmentWaveByteOffsetReg; |
| 2035 | |
| 2036 | if (IsShader) { |
| 2037 | PrivateSegmentWaveByteOffsetReg = |
| 2038 | Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); |
| 2039 | |
| 2040 | // This is true if the scratch wave byte offset doesn't have a fixed |
| 2041 | // location. |
| 2042 | if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { |
| 2043 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); |
| 2044 | Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); |
| 2045 | } |
| 2046 | } else |
| 2047 | PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); |
| 2048 | |
| 2049 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); |
| 2050 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); |
| 2051 | } |
| 2052 | } |
| 2053 | |
| 2054 | static void reservePrivateMemoryRegs(const TargetMachine &TM, |
| 2055 | MachineFunction &MF, |
| 2056 | const SIRegisterInfo &TRI, |
| 2057 | SIMachineFunctionInfo &Info) { |
| 2058 | // Now that we've figured out where the scratch register inputs are, see if |
| 2059 | // should reserve the arguments and use them directly. |
| 2060 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 2061 | bool HasStackObjects = MFI.hasStackObjects(); |
| 2062 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
| 2063 | |
| 2064 | // Record that we know we have non-spill stack objects so we don't need to |
| 2065 | // check all stack objects later. |
| 2066 | if (HasStackObjects) |
| 2067 | Info.setHasNonSpillStackObjects(true); |
| 2068 | |
| 2069 | // Everything live out of a block is spilled with fast regalloc, so it's |
| 2070 | // almost certain that spilling will be required. |
| 2071 | if (TM.getOptLevel() == CodeGenOpt::None) |
| 2072 | HasStackObjects = true; |
| 2073 | |
| 2074 | // For now assume stack access is needed in any callee functions, so we need |
| 2075 | // the scratch registers to pass in. |
| 2076 | bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); |
| 2077 | |
| 2078 | if (!ST.enableFlatScratch()) { |
| 2079 | if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { |
| 2080 | // If we have stack objects, we unquestionably need the private buffer |
| 2081 | // resource. For the Code Object V2 ABI, this will be the first 4 user |
| 2082 | // SGPR inputs. We can reserve those and use them directly. |
| 2083 | |
| 2084 | Register PrivateSegmentBufferReg = |
| 2085 | Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); |
| 2086 | Info.setScratchRSrcReg(PrivateSegmentBufferReg); |
| 2087 | } else { |
| 2088 | unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); |
| 2089 | // We tentatively reserve the last registers (skipping the last registers |
| 2090 | // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, |
| 2091 | // we'll replace these with the ones immediately after those which were |
| 2092 | // really allocated. In the prologue copies will be inserted from the |
| 2093 | // argument to these reserved registers. |
| 2094 | |
| 2095 | // Without HSA, relocations are used for the scratch pointer and the |
| 2096 | // buffer resource setup is always inserted in the prologue. Scratch wave |
| 2097 | // offset is still in an input SGPR. |
| 2098 | Info.setScratchRSrcReg(ReservedBufferReg); |
| 2099 | } |
| 2100 | } |
| 2101 | |
| 2102 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2103 | |
| 2104 | // For entry functions we have to set up the stack pointer if we use it, |
| 2105 | // whereas non-entry functions get this "for free". This means there is no |
| 2106 | // intrinsic advantage to using S32 over S34 in cases where we do not have |
| 2107 | // calls but do need a frame pointer (i.e. if we are requested to have one |
| 2108 | // because frame pointer elimination is disabled). To keep things simple we |
| 2109 | // only ever use S32 as the call ABI stack pointer, and so using it does not |
| 2110 | // imply we need a separate frame pointer. |
| 2111 | // |
| 2112 | // Try to use s32 as the SP, but move it if it would interfere with input |
| 2113 | // arguments. This won't work with calls though. |
| 2114 | // |
| 2115 | // FIXME: Move SP to avoid any possible inputs, or find a way to spill input |
| 2116 | // registers. |
| 2117 | if (!MRI.isLiveIn(AMDGPU::SGPR32)) { |
| 2118 | Info.setStackPtrOffsetReg(AMDGPU::SGPR32); |
| 2119 | } else { |
| 2120 | assert(AMDGPU::isShader(MF.getFunction().getCallingConv())); |
| 2121 | |
| 2122 | if (MFI.hasCalls()) |
| 2123 | report_fatal_error("call in graphics shader with too many input SGPRs" ); |
| 2124 | |
| 2125 | for (unsigned Reg : AMDGPU::SGPR_32RegClass) { |
| 2126 | if (!MRI.isLiveIn(Reg)) { |
| 2127 | Info.setStackPtrOffsetReg(Reg); |
| 2128 | break; |
| 2129 | } |
| 2130 | } |
| 2131 | |
| 2132 | if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) |
| 2133 | report_fatal_error("failed to find register for SP" ); |
| 2134 | } |
| 2135 | |
| 2136 | // hasFP should be accurate for entry functions even before the frame is |
| 2137 | // finalized, because it does not rely on the known stack size, only |
| 2138 | // properties like whether variable sized objects are present. |
| 2139 | if (ST.getFrameLowering()->hasFP(MF)) { |
| 2140 | Info.setFrameOffsetReg(AMDGPU::SGPR33); |
| 2141 | } |
| 2142 | } |
| 2143 | |
| 2144 | bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { |
| 2145 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
| 2146 | return !Info->isEntryFunction(); |
| 2147 | } |
| 2148 | |
| 2149 | void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 2150 | |
| 2151 | } |
| 2152 | |
| 2153 | void SITargetLowering::insertCopiesSplitCSR( |
| 2154 | MachineBasicBlock *Entry, |
| 2155 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 2156 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 2157 | |
| 2158 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
| 2159 | if (!IStart) |
| 2160 | return; |
| 2161 | |
| 2162 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 2163 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 2164 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 2165 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 2166 | const TargetRegisterClass *RC = nullptr; |
| 2167 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
| 2168 | RC = &AMDGPU::SGPR_64RegClass; |
| 2169 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
| 2170 | RC = &AMDGPU::SGPR_32RegClass; |
| 2171 | else |
| 2172 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 2173 | |
| 2174 | Register NewVR = MRI->createVirtualRegister(RC); |
| 2175 | // Create copy from CSR to a virtual register. |
| 2176 | Entry->addLiveIn(*I); |
| 2177 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
| 2178 | .addReg(*I); |
| 2179 | |
| 2180 | // Insert the copy-back instructions right before the terminator. |
| 2181 | for (auto *Exit : Exits) |
| 2182 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
| 2183 | TII->get(TargetOpcode::COPY), *I) |
| 2184 | .addReg(NewVR); |
| 2185 | } |
| 2186 | } |
| 2187 | |
| 2188 | SDValue SITargetLowering::LowerFormalArguments( |
| 2189 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 2190 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
| 2191 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 2192 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 2193 | |
| 2194 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2195 | const Function &Fn = MF.getFunction(); |
| 2196 | FunctionType *FType = MF.getFunction().getFunctionType(); |
| 2197 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 2198 | |
| 2199 | if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) { |
| 2200 | DiagnosticInfoUnsupported NoGraphicsHSA( |
| 2201 | Fn, "unsupported non-compute shaders with HSA" , DL.getDebugLoc()); |
| 2202 | DAG.getContext()->diagnose(NoGraphicsHSA); |
| 2203 | return DAG.getEntryNode(); |
| 2204 | } |
| 2205 | |
| 2206 | SmallVector<ISD::InputArg, 16> Splits; |
| 2207 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2208 | BitVector Skipped(Ins.size()); |
| 2209 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 2210 | *DAG.getContext()); |
| 2211 | |
| 2212 | bool IsGraphics = AMDGPU::isGraphics(CallConv); |
| 2213 | bool IsKernel = AMDGPU::isKernel(CallConv); |
| 2214 | bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); |
| 2215 | |
| 2216 | if (IsGraphics) { |
| 2217 | assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && |
| 2218 | (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && |
| 2219 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && |
| 2220 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && |
| 2221 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && |
| 2222 | !Info->hasWorkItemIDZ()); |
| 2223 | } |
| 2224 | |
| 2225 | if (CallConv == CallingConv::AMDGPU_PS) { |
| 2226 | processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); |
| 2227 | |
| 2228 | // At least one interpolation mode must be enabled or else the GPU will |
| 2229 | // hang. |
| 2230 | // |
| 2231 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user |
| 2232 | // set PSInputAddr, the user wants to enable some bits after the compilation |
| 2233 | // based on run-time states. Since we can't know what the final PSInputEna |
| 2234 | // will look like, so we shouldn't do anything here and the user should take |
| 2235 | // responsibility for the correct programming. |
| 2236 | // |
| 2237 | // Otherwise, the following restrictions apply: |
| 2238 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. |
| 2239 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be |
| 2240 | // enabled too. |
| 2241 | if ((Info->getPSInputAddr() & 0x7F) == 0 || |
| 2242 | ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) { |
| 2243 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
| 2244 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
| 2245 | Info->markPSInputAllocated(0); |
| 2246 | Info->markPSInputEnabled(0); |
| 2247 | } |
| 2248 | if (Subtarget->isAmdPalOS()) { |
| 2249 | // For isAmdPalOS, the user does not enable some bits after compilation |
| 2250 | // based on run-time states; the register values being generated here are |
| 2251 | // the final ones set in hardware. Therefore we need to apply the |
| 2252 | // workaround to PSInputAddr and PSInputEnable together. (The case where |
| 2253 | // a bit is set in PSInputAddr but not PSInputEnable is where the |
| 2254 | // frontend set up an input arg for a particular interpolation mode, but |
| 2255 | // nothing uses that input arg. Really we should have an earlier pass |
| 2256 | // that removes such an arg.) |
| 2257 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); |
| 2258 | if ((PsInputBits & 0x7F) == 0 || |
| 2259 | ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1))) |
| 2260 | Info->markPSInputEnabled( |
| 2261 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); |
| 2262 | } |
| 2263 | } else if (IsKernel) { |
| 2264 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); |
| 2265 | } else { |
| 2266 | Splits.append(Ins.begin(), Ins.end()); |
| 2267 | } |
| 2268 | |
| 2269 | if (IsEntryFunc) { |
| 2270 | allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); |
| 2271 | allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); |
| 2272 | } else { |
| 2273 | // For the fixed ABI, pass workitem IDs in the last argument register. |
| 2274 | if (AMDGPUTargetMachine::EnableFixedFunctionABI) |
| 2275 | allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); |
| 2276 | } |
| 2277 | |
| 2278 | if (IsKernel) { |
| 2279 | analyzeFormalArgumentsCompute(CCInfo, Ins); |
| 2280 | } else { |
| 2281 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); |
| 2282 | CCInfo.AnalyzeFormalArguments(Splits, AssignFn); |
| 2283 | } |
| 2284 | |
| 2285 | SmallVector<SDValue, 16> Chains; |
| 2286 | |
| 2287 | // FIXME: This is the minimum kernel argument alignment. We should improve |
| 2288 | // this to the maximum alignment of the arguments. |
| 2289 | // |
| 2290 | // FIXME: Alignment of explicit arguments totally broken with non-0 explicit |
| 2291 | // kern arg offset. |
| 2292 | const Align KernelArgBaseAlign = Align(16); |
| 2293 | |
| 2294 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
| 2295 | const ISD::InputArg &Arg = Ins[i]; |
| 2296 | if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { |
| 2297 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
| 2298 | continue; |
| 2299 | } |
| 2300 | |
| 2301 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
| 2302 | MVT VT = VA.getLocVT(); |
| 2303 | |
| 2304 | if (IsEntryFunc && VA.isMemLoc()) { |
| 2305 | VT = Ins[i].VT; |
| 2306 | EVT MemVT = VA.getLocVT(); |
| 2307 | |
| 2308 | const uint64_t Offset = VA.getLocMemOffset(); |
| 2309 | Align Alignment = commonAlignment(KernelArgBaseAlign, Offset); |
| 2310 | |
| 2311 | if (Arg.Flags.isByRef()) { |
| 2312 | SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset); |
| 2313 | |
| 2314 | const GCNTargetMachine &TM = |
| 2315 | static_cast<const GCNTargetMachine &>(getTargetMachine()); |
| 2316 | if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS, |
| 2317 | Arg.Flags.getPointerAddrSpace())) { |
| 2318 | Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS, |
| 2319 | Arg.Flags.getPointerAddrSpace()); |
| 2320 | } |
| 2321 | |
| 2322 | InVals.push_back(Ptr); |
| 2323 | continue; |
| 2324 | } |
| 2325 | |
| 2326 | SDValue Arg = lowerKernargMemParameter( |
| 2327 | DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]); |
| 2328 | Chains.push_back(Arg.getValue(1)); |
| 2329 | |
| 2330 | auto *ParamTy = |
| 2331 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); |
| 2332 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
| 2333 | ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
| 2334 | ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { |
| 2335 | // On SI local pointers are just offsets into LDS, so they are always |
| 2336 | // less than 16-bits. On CI and newer they could potentially be |
| 2337 | // real pointers, so we can't guarantee their size. |
| 2338 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
| 2339 | DAG.getValueType(MVT::i16)); |
| 2340 | } |
| 2341 | |
| 2342 | InVals.push_back(Arg); |
| 2343 | continue; |
| 2344 | } else if (!IsEntryFunc && VA.isMemLoc()) { |
| 2345 | SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); |
| 2346 | InVals.push_back(Val); |
| 2347 | if (!Arg.Flags.isByVal()) |
| 2348 | Chains.push_back(Val.getValue(1)); |
| 2349 | continue; |
| 2350 | } |
| 2351 | |
| 2352 | assert(VA.isRegLoc() && "Parameter must be in a register!" ); |
| 2353 | |
| 2354 | Register Reg = VA.getLocReg(); |
| 2355 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
| 2356 | EVT ValVT = VA.getValVT(); |
| 2357 | |
| 2358 | Reg = MF.addLiveIn(Reg, RC); |
| 2359 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
| 2360 | |
| 2361 | if (Arg.Flags.isSRet()) { |
| 2362 | // The return object should be reasonably addressable. |
| 2363 | |
| 2364 | // FIXME: This helps when the return is a real sret. If it is a |
| 2365 | // automatically inserted sret (i.e. CanLowerReturn returns false), an |
| 2366 | // extra copy is inserted in SelectionDAGBuilder which obscures this. |
| 2367 | unsigned NumBits |
| 2368 | = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); |
| 2369 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
| 2370 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); |
| 2371 | } |
| 2372 | |
| 2373 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 2374 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 2375 | // truncate to the right size. |
| 2376 | switch (VA.getLocInfo()) { |
| 2377 | case CCValAssign::Full: |
| 2378 | break; |
| 2379 | case CCValAssign::BCvt: |
| 2380 | Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); |
| 2381 | break; |
| 2382 | case CCValAssign::SExt: |
| 2383 | Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, |
| 2384 | DAG.getValueType(ValVT)); |
| 2385 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
| 2386 | break; |
| 2387 | case CCValAssign::ZExt: |
| 2388 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
| 2389 | DAG.getValueType(ValVT)); |
| 2390 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
| 2391 | break; |
| 2392 | case CCValAssign::AExt: |
| 2393 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
| 2394 | break; |
| 2395 | default: |
| 2396 | llvm_unreachable("Unknown loc info!" ); |
| 2397 | } |
| 2398 | |
| 2399 | InVals.push_back(Val); |
| 2400 | } |
| 2401 | |
| 2402 | if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { |
| 2403 | // Special inputs come after user arguments. |
| 2404 | allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); |
| 2405 | } |
| 2406 | |
| 2407 | // Start adding system SGPRs. |
| 2408 | if (IsEntryFunc) { |
| 2409 | allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics); |
| 2410 | } else { |
| 2411 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); |
| 2412 | allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); |
| 2413 | } |
| 2414 | |
| 2415 | auto &ArgUsageInfo = |
| 2416 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
| 2417 | ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); |
| 2418 | |
| 2419 | unsigned StackArgSize = CCInfo.getNextStackOffset(); |
| 2420 | Info->setBytesInStackArgArea(StackArgSize); |
| 2421 | |
| 2422 | return Chains.empty() ? Chain : |
| 2423 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
| 2424 | } |
| 2425 | |
| 2426 | // TODO: If return values can't fit in registers, we should return as many as |
| 2427 | // possible in registers before passing on stack. |
| 2428 | bool SITargetLowering::CanLowerReturn( |
| 2429 | CallingConv::ID CallConv, |
| 2430 | MachineFunction &MF, bool IsVarArg, |
| 2431 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2432 | LLVMContext &Context) const { |
| 2433 | // Replacing returns with sret/stack usage doesn't make sense for shaders. |
| 2434 | // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn |
| 2435 | // for shaders. Vector types should be explicitly handled by CC. |
| 2436 | if (AMDGPU::isEntryFunctionCC(CallConv)) |
| 2437 | return true; |
| 2438 | |
| 2439 | SmallVector<CCValAssign, 16> RVLocs; |
| 2440 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); |
| 2441 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); |
| 2442 | } |
| 2443 | |
| 2444 | SDValue |
| 2445 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 2446 | bool isVarArg, |
| 2447 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2448 | const SmallVectorImpl<SDValue> &OutVals, |
| 2449 | const SDLoc &DL, SelectionDAG &DAG) const { |
| 2450 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2451 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 2452 | |
| 2453 | if (AMDGPU::isKernel(CallConv)) { |
| 2454 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, |
| 2455 | OutVals, DL, DAG); |
| 2456 | } |
| 2457 | |
| 2458 | bool IsShader = AMDGPU::isShader(CallConv); |
| 2459 | |
| 2460 | Info->setIfReturnsVoid(Outs.empty()); |
| 2461 | bool IsWaveEnd = Info->returnsVoid() && IsShader; |
| 2462 | |
| 2463 | // CCValAssign - represent the assignment of the return value to a location. |
| 2464 | SmallVector<CCValAssign, 48> RVLocs; |
| 2465 | SmallVector<ISD::OutputArg, 48> Splits; |
| 2466 | |
| 2467 | // CCState - Info about the registers and stack slots. |
| 2468 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2469 | *DAG.getContext()); |
| 2470 | |
| 2471 | // Analyze outgoing return values. |
| 2472 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2473 | |
| 2474 | SDValue Flag; |
| 2475 | SmallVector<SDValue, 48> RetOps; |
| 2476 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
| 2477 | |
| 2478 | // Add return address for callable functions. |
| 2479 | if (!Info->isEntryFunction()) { |
| 2480 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 2481 | SDValue ReturnAddrReg = CreateLiveInRegister( |
| 2482 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
| 2483 | |
| 2484 | SDValue ReturnAddrVirtualReg = DAG.getRegister( |
| 2485 | MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass), |
| 2486 | MVT::i64); |
| 2487 | Chain = |
| 2488 | DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag); |
| 2489 | Flag = Chain.getValue(1); |
| 2490 | RetOps.push_back(ReturnAddrVirtualReg); |
| 2491 | } |
| 2492 | |
| 2493 | // Copy the result values into the output registers. |
| 2494 | for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; |
| 2495 | ++I, ++RealRVLocIdx) { |
| 2496 | CCValAssign &VA = RVLocs[I]; |
| 2497 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 2498 | // TODO: Partially return in registers if return values don't fit. |
| 2499 | SDValue Arg = OutVals[RealRVLocIdx]; |
| 2500 | |
| 2501 | // Copied from other backends. |
| 2502 | switch (VA.getLocInfo()) { |
| 2503 | case CCValAssign::Full: |
| 2504 | break; |
| 2505 | case CCValAssign::BCvt: |
| 2506 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
| 2507 | break; |
| 2508 | case CCValAssign::SExt: |
| 2509 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
| 2510 | break; |
| 2511 | case CCValAssign::ZExt: |
| 2512 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
| 2513 | break; |
| 2514 | case CCValAssign::AExt: |
| 2515 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
| 2516 | break; |
| 2517 | default: |
| 2518 | llvm_unreachable("Unknown loc info!" ); |
| 2519 | } |
| 2520 | |
| 2521 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); |
| 2522 | Flag = Chain.getValue(1); |
| 2523 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 2524 | } |
| 2525 | |
| 2526 | // FIXME: Does sret work properly? |
| 2527 | if (!Info->isEntryFunction()) { |
| 2528 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2529 | const MCPhysReg *I = |
| 2530 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
| 2531 | if (I) { |
| 2532 | for (; *I; ++I) { |
| 2533 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
| 2534 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); |
| 2535 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
| 2536 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
| 2537 | else |
| 2538 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 2539 | } |
| 2540 | } |
| 2541 | } |
| 2542 | |
| 2543 | // Update chain and glue. |
| 2544 | RetOps[0] = Chain; |
| 2545 | if (Flag.getNode()) |
| 2546 | RetOps.push_back(Flag); |
| 2547 | |
| 2548 | unsigned Opc = AMDGPUISD::ENDPGM; |
| 2549 | if (!IsWaveEnd) |
| 2550 | Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; |
| 2551 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); |
| 2552 | } |
| 2553 | |
| 2554 | SDValue SITargetLowering::LowerCallResult( |
| 2555 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, |
| 2556 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
| 2557 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, |
| 2558 | SDValue ThisVal) const { |
| 2559 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); |
| 2560 | |
| 2561 | // Assign locations to each value returned by this call. |
| 2562 | SmallVector<CCValAssign, 16> RVLocs; |
| 2563 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
| 2564 | *DAG.getContext()); |
| 2565 | CCInfo.AnalyzeCallResult(Ins, RetCC); |
| 2566 | |
| 2567 | // Copy all of the result registers out of their specified physreg. |
| 2568 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 2569 | CCValAssign VA = RVLocs[i]; |
| 2570 | SDValue Val; |
| 2571 | |
| 2572 | if (VA.isRegLoc()) { |
| 2573 | Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); |
| 2574 | Chain = Val.getValue(1); |
| 2575 | InFlag = Val.getValue(2); |
| 2576 | } else if (VA.isMemLoc()) { |
| 2577 | report_fatal_error("TODO: return values in memory" ); |
| 2578 | } else |
| 2579 | llvm_unreachable("unknown argument location type" ); |
| 2580 | |
| 2581 | switch (VA.getLocInfo()) { |
| 2582 | case CCValAssign::Full: |
| 2583 | break; |
| 2584 | case CCValAssign::BCvt: |
| 2585 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); |
| 2586 | break; |
| 2587 | case CCValAssign::ZExt: |
| 2588 | Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, |
| 2589 | DAG.getValueType(VA.getValVT())); |
| 2590 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
| 2591 | break; |
| 2592 | case CCValAssign::SExt: |
| 2593 | Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, |
| 2594 | DAG.getValueType(VA.getValVT())); |
| 2595 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
| 2596 | break; |
| 2597 | case CCValAssign::AExt: |
| 2598 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
| 2599 | break; |
| 2600 | default: |
| 2601 | llvm_unreachable("Unknown loc info!" ); |
| 2602 | } |
| 2603 | |
| 2604 | InVals.push_back(Val); |
| 2605 | } |
| 2606 | |
| 2607 | return Chain; |
| 2608 | } |
| 2609 | |
| 2610 | // Add code to pass special inputs required depending on used features separate |
| 2611 | // from the explicit user arguments present in the IR. |
| 2612 | void SITargetLowering::passSpecialInputs( |
| 2613 | CallLoweringInfo &CLI, |
| 2614 | CCState &CCInfo, |
| 2615 | const SIMachineFunctionInfo &Info, |
| 2616 | SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, |
| 2617 | SmallVectorImpl<SDValue> &MemOpChains, |
| 2618 | SDValue Chain) const { |
| 2619 | // If we don't have a call site, this was a call inserted by |
| 2620 | // legalization. These can never use special inputs. |
| 2621 | if (!CLI.CB) |
| 2622 | return; |
| 2623 | |
| 2624 | SelectionDAG &DAG = CLI.DAG; |
| 2625 | const SDLoc &DL = CLI.DL; |
| 2626 | |
| 2627 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2628 | const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); |
| 2629 | |
| 2630 | const AMDGPUFunctionArgInfo *CalleeArgInfo |
| 2631 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; |
| 2632 | if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) { |
| 2633 | auto &ArgUsageInfo = |
| 2634 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
| 2635 | CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); |
| 2636 | } |
| 2637 | |
| 2638 | // TODO: Unify with private memory register handling. This is complicated by |
| 2639 | // the fact that at least in kernels, the input argument is not necessarily |
| 2640 | // in the same location as the input. |
| 2641 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { |
| 2642 | AMDGPUFunctionArgInfo::DISPATCH_PTR, |
| 2643 | AMDGPUFunctionArgInfo::QUEUE_PTR, |
| 2644 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, |
| 2645 | AMDGPUFunctionArgInfo::DISPATCH_ID, |
| 2646 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, |
| 2647 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, |
| 2648 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z |
| 2649 | }; |
| 2650 | |
| 2651 | for (auto InputID : InputRegs) { |
| 2652 | const ArgDescriptor *OutgoingArg; |
| 2653 | const TargetRegisterClass *ArgRC; |
| 2654 | LLT ArgTy; |
| 2655 | |
| 2656 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
| 2657 | CalleeArgInfo->getPreloadedValue(InputID); |
| 2658 | if (!OutgoingArg) |
| 2659 | continue; |
| 2660 | |
| 2661 | const ArgDescriptor *IncomingArg; |
| 2662 | const TargetRegisterClass *IncomingArgRC; |
| 2663 | LLT Ty; |
| 2664 | std::tie(IncomingArg, IncomingArgRC, Ty) = |
| 2665 | CallerArgInfo.getPreloadedValue(InputID); |
| 2666 | assert(IncomingArgRC == ArgRC); |
| 2667 | |
| 2668 | // All special arguments are ints for now. |
| 2669 | EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; |
| 2670 | SDValue InputReg; |
| 2671 | |
| 2672 | if (IncomingArg) { |
| 2673 | InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); |
| 2674 | } else { |
| 2675 | // The implicit arg ptr is special because it doesn't have a corresponding |
| 2676 | // input for kernels, and is computed from the kernarg segment pointer. |
| 2677 | assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
| 2678 | InputReg = getImplicitArgPtr(DAG, DL); |
| 2679 | } |
| 2680 | |
| 2681 | if (OutgoingArg->isRegister()) { |
| 2682 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
| 2683 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) |
| 2684 | report_fatal_error("failed to allocate implicit input argument" ); |
| 2685 | } else { |
| 2686 | unsigned SpecialArgOffset = |
| 2687 | CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); |
| 2688 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
| 2689 | SpecialArgOffset); |
| 2690 | MemOpChains.push_back(ArgStore); |
| 2691 | } |
| 2692 | } |
| 2693 | |
| 2694 | // Pack workitem IDs into a single register or pass it as is if already |
| 2695 | // packed. |
| 2696 | const ArgDescriptor *OutgoingArg; |
| 2697 | const TargetRegisterClass *ArgRC; |
| 2698 | LLT Ty; |
| 2699 | |
| 2700 | std::tie(OutgoingArg, ArgRC, Ty) = |
| 2701 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); |
| 2702 | if (!OutgoingArg) |
| 2703 | std::tie(OutgoingArg, ArgRC, Ty) = |
| 2704 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); |
| 2705 | if (!OutgoingArg) |
| 2706 | std::tie(OutgoingArg, ArgRC, Ty) = |
| 2707 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); |
| 2708 | if (!OutgoingArg) |
| 2709 | return; |
| 2710 | |
| 2711 | const ArgDescriptor *IncomingArgX = std::get<0>( |
| 2712 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X)); |
| 2713 | const ArgDescriptor *IncomingArgY = std::get<0>( |
| 2714 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y)); |
| 2715 | const ArgDescriptor *IncomingArgZ = std::get<0>( |
| 2716 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z)); |
| 2717 | |
| 2718 | SDValue InputReg; |
| 2719 | SDLoc SL; |
| 2720 | |
| 2721 | // If incoming ids are not packed we need to pack them. |
| 2722 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) |
| 2723 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); |
| 2724 | |
| 2725 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) { |
| 2726 | SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); |
| 2727 | Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, |
| 2728 | DAG.getShiftAmountConstant(10, MVT::i32, SL)); |
| 2729 | InputReg = InputReg.getNode() ? |
| 2730 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; |
| 2731 | } |
| 2732 | |
| 2733 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) { |
| 2734 | SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); |
| 2735 | Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, |
| 2736 | DAG.getShiftAmountConstant(20, MVT::i32, SL)); |
| 2737 | InputReg = InputReg.getNode() ? |
| 2738 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; |
| 2739 | } |
| 2740 | |
| 2741 | if (!InputReg.getNode()) { |
| 2742 | // Workitem ids are already packed, any of present incoming arguments |
| 2743 | // will carry all required fields. |
| 2744 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( |
| 2745 | IncomingArgX ? *IncomingArgX : |
| 2746 | IncomingArgY ? *IncomingArgY : |
| 2747 | *IncomingArgZ, ~0u); |
| 2748 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); |
| 2749 | } |
| 2750 | |
| 2751 | if (OutgoingArg->isRegister()) { |
| 2752 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
| 2753 | CCInfo.AllocateReg(OutgoingArg->getRegister()); |
| 2754 | } else { |
| 2755 | unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); |
| 2756 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
| 2757 | SpecialArgOffset); |
| 2758 | MemOpChains.push_back(ArgStore); |
| 2759 | } |
| 2760 | } |
| 2761 | |
| 2762 | static bool canGuaranteeTCO(CallingConv::ID CC) { |
| 2763 | return CC == CallingConv::Fast; |
| 2764 | } |
| 2765 | |
| 2766 | /// Return true if we might ever do TCO for calls with this calling convention. |
| 2767 | static bool mayTailCallThisCC(CallingConv::ID CC) { |
| 2768 | switch (CC) { |
| 2769 | case CallingConv::C: |
| 2770 | return true; |
| 2771 | default: |
| 2772 | return canGuaranteeTCO(CC); |
| 2773 | } |
| 2774 | } |
| 2775 | |
| 2776 | bool SITargetLowering::isEligibleForTailCallOptimization( |
| 2777 | SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, |
| 2778 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2779 | const SmallVectorImpl<SDValue> &OutVals, |
| 2780 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { |
| 2781 | if (!mayTailCallThisCC(CalleeCC)) |
| 2782 | return false; |
| 2783 | |
| 2784 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2785 | const Function &CallerF = MF.getFunction(); |
| 2786 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 2787 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 2788 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 2789 | |
| 2790 | // Kernels aren't callable, and don't have a live in return address so it |
| 2791 | // doesn't make sense to do a tail call with entry functions. |
| 2792 | if (!CallerPreserved) |
| 2793 | return false; |
| 2794 | |
| 2795 | bool CCMatch = CallerCC == CalleeCC; |
| 2796 | |
| 2797 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { |
| 2798 | if (canGuaranteeTCO(CalleeCC) && CCMatch) |
| 2799 | return true; |
| 2800 | return false; |
| 2801 | } |
| 2802 | |
| 2803 | // TODO: Can we handle var args? |
| 2804 | if (IsVarArg) |
| 2805 | return false; |
| 2806 | |
| 2807 | for (const Argument &Arg : CallerF.args()) { |
| 2808 | if (Arg.hasByValAttr()) |
| 2809 | return false; |
| 2810 | } |
| 2811 | |
| 2812 | LLVMContext &Ctx = *DAG.getContext(); |
| 2813 | |
| 2814 | // Check that the call results are passed in the same way. |
| 2815 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, |
| 2816 | CCAssignFnForCall(CalleeCC, IsVarArg), |
| 2817 | CCAssignFnForCall(CallerCC, IsVarArg))) |
| 2818 | return false; |
| 2819 | |
| 2820 | // The callee has to preserve all registers the caller needs to preserve. |
| 2821 | if (!CCMatch) { |
| 2822 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 2823 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
| 2824 | return false; |
| 2825 | } |
| 2826 | |
| 2827 | // Nothing more to check if the callee is taking no arguments. |
| 2828 | if (Outs.empty()) |
| 2829 | return true; |
| 2830 | |
| 2831 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2832 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); |
| 2833 | |
| 2834 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); |
| 2835 | |
| 2836 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
| 2837 | // If the stack arguments for this call do not fit into our own save area then |
| 2838 | // the call cannot be made tail. |
| 2839 | // TODO: Is this really necessary? |
| 2840 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) |
| 2841 | return false; |
| 2842 | |
| 2843 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2844 | return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); |
| 2845 | } |
| 2846 | |
| 2847 | bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 2848 | if (!CI->isTailCall()) |
| 2849 | return false; |
| 2850 | |
| 2851 | const Function *ParentFn = CI->getParent()->getParent(); |
| 2852 | if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) |
| 2853 | return false; |
| 2854 | return true; |
| 2855 | } |
| 2856 | |
| 2857 | // The wave scratch offset register is used as the global base pointer. |
| 2858 | SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, |
| 2859 | SmallVectorImpl<SDValue> &InVals) const { |
| 2860 | SelectionDAG &DAG = CLI.DAG; |
| 2861 | const SDLoc &DL = CLI.DL; |
| 2862 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; |
| 2863 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; |
| 2864 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; |
| 2865 | SDValue Chain = CLI.Chain; |
| 2866 | SDValue Callee = CLI.Callee; |
| 2867 | bool &IsTailCall = CLI.IsTailCall; |
| 2868 | CallingConv::ID CallConv = CLI.CallConv; |
| 2869 | bool IsVarArg = CLI.IsVarArg; |
| 2870 | bool IsSibCall = false; |
| 2871 | bool IsThisReturn = false; |
| 2872 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2873 | |
| 2874 | if (Callee.isUndef() || isNullConstant(Callee)) { |
| 2875 | if (!CLI.IsTailCall) { |
| 2876 | for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) |
| 2877 | InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); |
| 2878 | } |
| 2879 | |
| 2880 | return Chain; |
| 2881 | } |
| 2882 | |
| 2883 | if (IsVarArg) { |
| 2884 | return lowerUnhandledCall(CLI, InVals, |
| 2885 | "unsupported call to variadic function " ); |
| 2886 | } |
| 2887 | |
| 2888 | if (!CLI.CB) |
| 2889 | report_fatal_error("unsupported libcall legalization" ); |
| 2890 | |
| 2891 | if (!AMDGPUTargetMachine::EnableFixedFunctionABI && |
| 2892 | !CLI.CB->getCalledFunction() && CallConv != CallingConv::AMDGPU_Gfx) { |
| 2893 | return lowerUnhandledCall(CLI, InVals, |
| 2894 | "unsupported indirect call to function " ); |
| 2895 | } |
| 2896 | |
| 2897 | if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { |
| 2898 | return lowerUnhandledCall(CLI, InVals, |
| 2899 | "unsupported required tail call to function " ); |
| 2900 | } |
| 2901 | |
| 2902 | if (AMDGPU::isShader(CallConv)) { |
| 2903 | // Note the issue is with the CC of the called function, not of the call |
| 2904 | // itself. |
| 2905 | return lowerUnhandledCall(CLI, InVals, |
| 2906 | "unsupported call to a shader function " ); |
| 2907 | } |
| 2908 | |
| 2909 | if (AMDGPU::isShader(MF.getFunction().getCallingConv()) && |
| 2910 | CallConv != CallingConv::AMDGPU_Gfx) { |
| 2911 | // Only allow calls with specific calling conventions. |
| 2912 | return lowerUnhandledCall(CLI, InVals, |
| 2913 | "unsupported calling convention for call from " |
| 2914 | "graphics shader of function " ); |
| 2915 | } |
| 2916 | |
| 2917 | if (IsTailCall) { |
| 2918 | IsTailCall = isEligibleForTailCallOptimization( |
| 2919 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); |
| 2920 | if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) { |
| 2921 | report_fatal_error("failed to perform tail call elimination on a call " |
| 2922 | "site marked musttail" ); |
| 2923 | } |
| 2924 | |
| 2925 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
| 2926 | |
| 2927 | // A sibling call is one where we're under the usual C ABI and not planning |
| 2928 | // to change that but can still do a tail call: |
| 2929 | if (!TailCallOpt && IsTailCall) |
| 2930 | IsSibCall = true; |
| 2931 | |
| 2932 | if (IsTailCall) |
| 2933 | ++NumTailCalls; |
| 2934 | } |
| 2935 | |
| 2936 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 2937 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; |
| 2938 | SmallVector<SDValue, 8> MemOpChains; |
| 2939 | |
| 2940 | // Analyze operands of the call, assigning locations to each operand. |
| 2941 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2942 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
| 2943 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); |
| 2944 | |
| 2945 | if (AMDGPUTargetMachine::EnableFixedFunctionABI && |
| 2946 | CallConv != CallingConv::AMDGPU_Gfx) { |
| 2947 | // With a fixed ABI, allocate fixed registers before user arguments. |
| 2948 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); |
| 2949 | } |
| 2950 | |
| 2951 | CCInfo.AnalyzeCallOperands(Outs, AssignFn); |
| 2952 | |
| 2953 | // Get a count of how many bytes are to be pushed on the stack. |
| 2954 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
| 2955 | |
| 2956 | if (IsSibCall) { |
| 2957 | // Since we're not changing the ABI to make this a tail call, the memory |
| 2958 | // operands are already available in the caller's incoming argument space. |
| 2959 | NumBytes = 0; |
| 2960 | } |
| 2961 | |
| 2962 | // FPDiff is the byte offset of the call's argument area from the callee's. |
| 2963 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
| 2964 | // by this amount for a tail call. In a sibling call it must be 0 because the |
| 2965 | // caller will deallocate the entire stack and the callee still expects its |
| 2966 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
| 2967 | int32_t FPDiff = 0; |
| 2968 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 2969 | |
| 2970 | // Adjust the stack pointer for the new arguments... |
| 2971 | // These operations are automatically eliminated by the prolog/epilog pass |
| 2972 | if (!IsSibCall) { |
| 2973 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); |
| 2974 | |
| 2975 | if (!Subtarget->enableFlatScratch()) { |
| 2976 | SmallVector<SDValue, 4> CopyFromChains; |
| 2977 | |
| 2978 | // In the HSA case, this should be an identity copy. |
| 2979 | SDValue ScratchRSrcReg |
| 2980 | = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); |
| 2981 | RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); |
| 2982 | CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); |
| 2983 | Chain = DAG.getTokenFactor(DL, CopyFromChains); |
| 2984 | } |
| 2985 | } |
| 2986 | |
| 2987 | MVT PtrVT = MVT::i32; |
| 2988 | |
| 2989 | // Walk the register/memloc assignments, inserting copies/loads. |
| 2990 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 2991 | CCValAssign &VA = ArgLocs[i]; |
| 2992 | SDValue Arg = OutVals[i]; |
| 2993 | |
| 2994 | // Promote the value if needed. |
| 2995 | switch (VA.getLocInfo()) { |
| 2996 | case CCValAssign::Full: |
| 2997 | break; |
| 2998 | case CCValAssign::BCvt: |
| 2999 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
| 3000 | break; |
| 3001 | case CCValAssign::ZExt: |
| 3002 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
| 3003 | break; |
| 3004 | case CCValAssign::SExt: |
| 3005 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
| 3006 | break; |
| 3007 | case CCValAssign::AExt: |
| 3008 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
| 3009 | break; |
| 3010 | case CCValAssign::FPExt: |
| 3011 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); |
| 3012 | break; |
| 3013 | default: |
| 3014 | llvm_unreachable("Unknown loc info!" ); |
| 3015 | } |
| 3016 | |
| 3017 | if (VA.isRegLoc()) { |
| 3018 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
| 3019 | } else { |
| 3020 | assert(VA.isMemLoc()); |
| 3021 | |
| 3022 | SDValue DstAddr; |
| 3023 | MachinePointerInfo DstInfo; |
| 3024 | |
| 3025 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 3026 | int32_t Offset = LocMemOffset; |
| 3027 | |
| 3028 | SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); |
| 3029 | MaybeAlign Alignment; |
| 3030 | |
| 3031 | if (IsTailCall) { |
| 3032 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
| 3033 | unsigned OpSize = Flags.isByVal() ? |
| 3034 | Flags.getByValSize() : VA.getValVT().getStoreSize(); |
| 3035 | |
| 3036 | // FIXME: We can have better than the minimum byval required alignment. |
| 3037 | Alignment = |
| 3038 | Flags.isByVal() |
| 3039 | ? Flags.getNonZeroByValAlign() |
| 3040 | : commonAlignment(Subtarget->getStackAlignment(), Offset); |
| 3041 | |
| 3042 | Offset = Offset + FPDiff; |
| 3043 | int FI = MFI.CreateFixedObject(OpSize, Offset, true); |
| 3044 | |
| 3045 | DstAddr = DAG.getFrameIndex(FI, PtrVT); |
| 3046 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); |
| 3047 | |
| 3048 | // Make sure any stack arguments overlapping with where we're storing |
| 3049 | // are loaded before this eventual operation. Otherwise they'll be |
| 3050 | // clobbered. |
| 3051 | |
| 3052 | // FIXME: Why is this really necessary? This seems to just result in a |
| 3053 | // lot of code to copy the stack and write them back to the same |
| 3054 | // locations, which are supposed to be immutable? |
| 3055 | Chain = addTokenForArgument(Chain, DAG, MFI, FI); |
| 3056 | } else { |
| 3057 | DstAddr = PtrOff; |
| 3058 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); |
| 3059 | Alignment = |
| 3060 | commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); |
| 3061 | } |
| 3062 | |
| 3063 | if (Outs[i].Flags.isByVal()) { |
| 3064 | SDValue SizeNode = |
| 3065 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); |
| 3066 | SDValue Cpy = |
| 3067 | DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, |
| 3068 | Outs[i].Flags.getNonZeroByValAlign(), |
| 3069 | /*isVol = */ false, /*AlwaysInline = */ true, |
| 3070 | /*isTailCall = */ false, DstInfo, |
| 3071 | MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); |
| 3072 | |
| 3073 | MemOpChains.push_back(Cpy); |
| 3074 | } else { |
| 3075 | SDValue Store = |
| 3076 | DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment); |
| 3077 | MemOpChains.push_back(Store); |
| 3078 | } |
| 3079 | } |
| 3080 | } |
| 3081 | |
| 3082 | if (!AMDGPUTargetMachine::EnableFixedFunctionABI && |
| 3083 | CallConv != CallingConv::AMDGPU_Gfx) { |
| 3084 | // Copy special input registers after user input arguments. |
| 3085 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); |
| 3086 | } |
| 3087 | |
| 3088 | if (!MemOpChains.empty()) |
| 3089 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); |
| 3090 | |
| 3091 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 3092 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 3093 | SDValue InFlag; |
| 3094 | for (auto &RegToPass : RegsToPass) { |
| 3095 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, |
| 3096 | RegToPass.second, InFlag); |
| 3097 | InFlag = Chain.getValue(1); |
| 3098 | } |
| 3099 | |
| 3100 | |
| 3101 | SDValue PhysReturnAddrReg; |
| 3102 | if (IsTailCall) { |
| 3103 | // Since the return is being combined with the call, we need to pass on the |
| 3104 | // return address. |
| 3105 | |
| 3106 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 3107 | SDValue ReturnAddrReg = CreateLiveInRegister( |
| 3108 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
| 3109 | |
| 3110 | PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), |
| 3111 | MVT::i64); |
| 3112 | Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); |
| 3113 | InFlag = Chain.getValue(1); |
| 3114 | } |
| 3115 | |
| 3116 | // We don't usually want to end the call-sequence here because we would tidy |
| 3117 | // the frame up *after* the call, however in the ABI-changing tail-call case |
| 3118 | // we've carefully laid out the parameters so that when sp is reset they'll be |
| 3119 | // in the correct location. |
| 3120 | if (IsTailCall && !IsSibCall) { |
| 3121 | Chain = DAG.getCALLSEQ_END(Chain, |
| 3122 | DAG.getTargetConstant(NumBytes, DL, MVT::i32), |
| 3123 | DAG.getTargetConstant(0, DL, MVT::i32), |
| 3124 | InFlag, DL); |
| 3125 | InFlag = Chain.getValue(1); |
| 3126 | } |
| 3127 | |
| 3128 | std::vector<SDValue> Ops; |
| 3129 | Ops.push_back(Chain); |
| 3130 | Ops.push_back(Callee); |
| 3131 | // Add a redundant copy of the callee global which will not be legalized, as |
| 3132 | // we need direct access to the callee later. |
| 3133 | if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) { |
| 3134 | const GlobalValue *GV = GSD->getGlobal(); |
| 3135 | Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); |
| 3136 | } else { |
| 3137 | Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); |
| 3138 | } |
| 3139 | |
| 3140 | if (IsTailCall) { |
| 3141 | // Each tail call may have to adjust the stack by a different amount, so |
| 3142 | // this information must travel along with the operation for eventual |
| 3143 | // consumption by emitEpilogue. |
| 3144 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); |
| 3145 | |
| 3146 | Ops.push_back(PhysReturnAddrReg); |
| 3147 | } |
| 3148 | |
| 3149 | // Add argument registers to the end of the list so that they are known live |
| 3150 | // into the call. |
| 3151 | for (auto &RegToPass : RegsToPass) { |
| 3152 | Ops.push_back(DAG.getRegister(RegToPass.first, |
| 3153 | RegToPass.second.getValueType())); |
| 3154 | } |
| 3155 | |
| 3156 | // Add a register mask operand representing the call-preserved registers. |
| 3157 | |
| 3158 | auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); |
| 3159 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); |
| 3160 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 3161 | Ops.push_back(DAG.getRegisterMask(Mask)); |
| 3162 | |
| 3163 | if (InFlag.getNode()) |
| 3164 | Ops.push_back(InFlag); |
| 3165 | |
| 3166 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 3167 | |
| 3168 | // If we're doing a tall call, use a TC_RETURN here rather than an |
| 3169 | // actual call instruction. |
| 3170 | if (IsTailCall) { |
| 3171 | MFI.setHasTailCall(); |
| 3172 | return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); |
| 3173 | } |
| 3174 | |
| 3175 | // Returns a chain and a flag for retval copy to use. |
| 3176 | SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); |
| 3177 | Chain = Call.getValue(0); |
| 3178 | InFlag = Call.getValue(1); |
| 3179 | |
| 3180 | uint64_t CalleePopBytes = NumBytes; |
| 3181 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), |
| 3182 | DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), |
| 3183 | InFlag, DL); |
| 3184 | if (!Ins.empty()) |
| 3185 | InFlag = Chain.getValue(1); |
| 3186 | |
| 3187 | // Handle result values, copying them out of physregs into vregs that we |
| 3188 | // return. |
| 3189 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, |
| 3190 | InVals, IsThisReturn, |
| 3191 | IsThisReturn ? OutVals[0] : SDValue()); |
| 3192 | } |
| 3193 | |
| 3194 | // This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC, |
| 3195 | // except for applying the wave size scale to the increment amount. |
| 3196 | SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl( |
| 3197 | SDValue Op, SelectionDAG &DAG) const { |
| 3198 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 3199 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 3200 | |
| 3201 | SDLoc dl(Op); |
| 3202 | EVT VT = Op.getValueType(); |
| 3203 | SDValue Tmp1 = Op; |
| 3204 | SDValue Tmp2 = Op.getValue(1); |
| 3205 | SDValue Tmp3 = Op.getOperand(2); |
| 3206 | SDValue Chain = Tmp1.getOperand(0); |
| 3207 | |
| 3208 | Register SPReg = Info->getStackPtrOffsetReg(); |
| 3209 | |
| 3210 | // Chain the dynamic stack allocation so that it doesn't modify the stack |
| 3211 | // pointer when other instructions are using the stack. |
| 3212 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); |
| 3213 | |
| 3214 | SDValue Size = Tmp2.getOperand(1); |
| 3215 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); |
| 3216 | Chain = SP.getValue(1); |
| 3217 | MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue(); |
| 3218 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
| 3219 | const TargetFrameLowering *TFL = ST.getFrameLowering(); |
| 3220 | unsigned Opc = |
| 3221 | TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? |
| 3222 | ISD::ADD : ISD::SUB; |
| 3223 | |
| 3224 | SDValue ScaledSize = DAG.getNode( |
| 3225 | ISD::SHL, dl, VT, Size, |
| 3226 | DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32)); |
| 3227 | |
| 3228 | Align StackAlign = TFL->getStackAlign(); |
| 3229 | Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value |
| 3230 | if (Alignment && *Alignment > StackAlign) { |
| 3231 | Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, |
| 3232 | DAG.getConstant(-(uint64_t)Alignment->value() |
| 3233 | << ST.getWavefrontSizeLog2(), |
| 3234 | dl, VT)); |
| 3235 | } |
| 3236 | |
| 3237 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain |
| 3238 | Tmp2 = DAG.getCALLSEQ_END( |
| 3239 | Chain, DAG.getIntPtrConstant(0, dl, true), |
| 3240 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); |
| 3241 | |
| 3242 | return DAG.getMergeValues({Tmp1, Tmp2}, dl); |
| 3243 | } |
| 3244 | |
| 3245 | SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, |
| 3246 | SelectionDAG &DAG) const { |
| 3247 | // We only handle constant sizes here to allow non-entry block, static sized |
| 3248 | // allocas. A truly dynamic value is more difficult to support because we |
| 3249 | // don't know if the size value is uniform or not. If the size isn't uniform, |
| 3250 | // we would need to do a wave reduction to get the maximum size to know how |
| 3251 | // much to increment the uniform stack pointer. |
| 3252 | SDValue Size = Op.getOperand(1); |
| 3253 | if (isa<ConstantSDNode>(Size)) |
| 3254 | return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion. |
| 3255 | |
| 3256 | return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 3257 | } |
| 3258 | |
| 3259 | Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, |
| 3260 | const MachineFunction &MF) const { |
| 3261 | Register Reg = StringSwitch<Register>(RegName) |
| 3262 | .Case("m0" , AMDGPU::M0) |
| 3263 | .Case("exec" , AMDGPU::EXEC) |
| 3264 | .Case("exec_lo" , AMDGPU::EXEC_LO) |
| 3265 | .Case("exec_hi" , AMDGPU::EXEC_HI) |
| 3266 | .Case("flat_scratch" , AMDGPU::FLAT_SCR) |
| 3267 | .Case("flat_scratch_lo" , AMDGPU::FLAT_SCR_LO) |
| 3268 | .Case("flat_scratch_hi" , AMDGPU::FLAT_SCR_HI) |
| 3269 | .Default(Register()); |
| 3270 | |
| 3271 | if (Reg == AMDGPU::NoRegister) { |
| 3272 | report_fatal_error(Twine("invalid register name \"" |
| 3273 | + StringRef(RegName) + "\"." )); |
| 3274 | |
| 3275 | } |
| 3276 | |
| 3277 | if (!Subtarget->hasFlatScrRegister() && |
| 3278 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { |
| 3279 | report_fatal_error(Twine("invalid register \"" |
| 3280 | + StringRef(RegName) + "\" for subtarget." )); |
| 3281 | } |
| 3282 | |
| 3283 | switch (Reg) { |
| 3284 | case AMDGPU::M0: |
| 3285 | case AMDGPU::EXEC_LO: |
| 3286 | case AMDGPU::EXEC_HI: |
| 3287 | case AMDGPU::FLAT_SCR_LO: |
| 3288 | case AMDGPU::FLAT_SCR_HI: |
| 3289 | if (VT.getSizeInBits() == 32) |
| 3290 | return Reg; |
| 3291 | break; |
| 3292 | case AMDGPU::EXEC: |
| 3293 | case AMDGPU::FLAT_SCR: |
| 3294 | if (VT.getSizeInBits() == 64) |
| 3295 | return Reg; |
| 3296 | break; |
| 3297 | default: |
| 3298 | llvm_unreachable("missing register type checking" ); |
| 3299 | } |
| 3300 | |
| 3301 | report_fatal_error(Twine("invalid type for register \"" |
| 3302 | + StringRef(RegName) + "\"." )); |
| 3303 | } |
| 3304 | |
| 3305 | // If kill is not the last instruction, split the block so kill is always a |
| 3306 | // proper terminator. |
| 3307 | MachineBasicBlock * |
| 3308 | SITargetLowering::splitKillBlock(MachineInstr &MI, |
| 3309 | MachineBasicBlock *BB) const { |
| 3310 | MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/); |
| 3311 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 3312 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); |
| 3313 | return SplitBB; |
| 3314 | } |
| 3315 | |
| 3316 | // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, |
| 3317 | // \p MI will be the only instruction in the loop body block. Otherwise, it will |
| 3318 | // be the first instruction in the remainder block. |
| 3319 | // |
| 3320 | /// \returns { LoopBody, Remainder } |
| 3321 | static std::pair<MachineBasicBlock *, MachineBasicBlock *> |
| 3322 | splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { |
| 3323 | MachineFunction *MF = MBB.getParent(); |
| 3324 | MachineBasicBlock::iterator I(&MI); |
| 3325 | |
| 3326 | // To insert the loop we need to split the block. Move everything after this |
| 3327 | // point to a new block, and insert a new empty block between the two. |
| 3328 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); |
| 3329 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); |
| 3330 | MachineFunction::iterator MBBI(MBB); |
| 3331 | ++MBBI; |
| 3332 | |
| 3333 | MF->insert(MBBI, LoopBB); |
| 3334 | MF->insert(MBBI, RemainderBB); |
| 3335 | |
| 3336 | LoopBB->addSuccessor(LoopBB); |
| 3337 | LoopBB->addSuccessor(RemainderBB); |
| 3338 | |
| 3339 | // Move the rest of the block into a new block. |
| 3340 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); |
| 3341 | |
| 3342 | if (InstInLoop) { |
| 3343 | auto Next = std::next(I); |
| 3344 | |
| 3345 | // Move instruction to loop body. |
| 3346 | LoopBB->splice(LoopBB->begin(), &MBB, I, Next); |
| 3347 | |
| 3348 | // Move the rest of the block. |
| 3349 | RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); |
| 3350 | } else { |
| 3351 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); |
| 3352 | } |
| 3353 | |
| 3354 | MBB.addSuccessor(LoopBB); |
| 3355 | |
| 3356 | return std::make_pair(LoopBB, RemainderBB); |
| 3357 | } |
| 3358 | |
| 3359 | /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. |
| 3360 | void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { |
| 3361 | MachineBasicBlock *MBB = MI.getParent(); |
| 3362 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 3363 | auto I = MI.getIterator(); |
| 3364 | auto E = std::next(I); |
| 3365 | |
| 3366 | BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) |
| 3367 | .addImm(0); |
| 3368 | |
| 3369 | MIBundleBuilder Bundler(*MBB, I, E); |
| 3370 | finalizeBundle(*MBB, Bundler.begin()); |
| 3371 | } |
| 3372 | |
| 3373 | MachineBasicBlock * |
| 3374 | SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, |
| 3375 | MachineBasicBlock *BB) const { |
| 3376 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3377 | |
| 3378 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 3379 | |
| 3380 | MachineBasicBlock *LoopBB; |
| 3381 | MachineBasicBlock *RemainderBB; |
| 3382 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 3383 | |
| 3384 | // Apparently kill flags are only valid if the def is in the same block? |
| 3385 | if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) |
| 3386 | Src->setIsKill(false); |
| 3387 | |
| 3388 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); |
| 3389 | |
| 3390 | MachineBasicBlock::iterator I = LoopBB->end(); |
| 3391 | |
| 3392 | const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( |
| 3393 | AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); |
| 3394 | |
| 3395 | // Clear TRAP_STS.MEM_VIOL |
| 3396 | BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) |
| 3397 | .addImm(0) |
| 3398 | .addImm(EncodedReg); |
| 3399 | |
| 3400 | bundleInstWithWaitcnt(MI); |
| 3401 | |
| 3402 | Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
| 3403 | |
| 3404 | // Load and check TRAP_STS.MEM_VIOL |
| 3405 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) |
| 3406 | .addImm(EncodedReg); |
| 3407 | |
| 3408 | // FIXME: Do we need to use an isel pseudo that may clobber scc? |
| 3409 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) |
| 3410 | .addReg(Reg, RegState::Kill) |
| 3411 | .addImm(0); |
| 3412 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
| 3413 | .addMBB(LoopBB); |
| 3414 | |
| 3415 | return RemainderBB; |
| 3416 | } |
| 3417 | |
| 3418 | // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the |
| 3419 | // wavefront. If the value is uniform and just happens to be in a VGPR, this |
| 3420 | // will only do one iteration. In the worst case, this will loop 64 times. |
| 3421 | // |
| 3422 | // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. |
| 3423 | static MachineBasicBlock::iterator |
| 3424 | emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI, |
| 3425 | MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, |
| 3426 | const DebugLoc &DL, const MachineOperand &Idx, |
| 3427 | unsigned InitReg, unsigned ResultReg, unsigned PhiReg, |
| 3428 | unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode, |
| 3429 | Register &SGPRIdxReg) { |
| 3430 | |
| 3431 | MachineFunction *MF = OrigBB.getParent(); |
| 3432 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 3433 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 3434 | MachineBasicBlock::iterator I = LoopBB.begin(); |
| 3435 | |
| 3436 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
| 3437 | Register PhiExec = MRI.createVirtualRegister(BoolRC); |
| 3438 | Register NewExec = MRI.createVirtualRegister(BoolRC); |
| 3439 | Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
| 3440 | Register CondReg = MRI.createVirtualRegister(BoolRC); |
| 3441 | |
| 3442 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) |
| 3443 | .addReg(InitReg) |
| 3444 | .addMBB(&OrigBB) |
| 3445 | .addReg(ResultReg) |
| 3446 | .addMBB(&LoopBB); |
| 3447 | |
| 3448 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) |
| 3449 | .addReg(InitSaveExecReg) |
| 3450 | .addMBB(&OrigBB) |
| 3451 | .addReg(NewExec) |
| 3452 | .addMBB(&LoopBB); |
| 3453 | |
| 3454 | // Read the next variant <- also loop target. |
| 3455 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) |
| 3456 | .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef())); |
| 3457 | |
| 3458 | // Compare the just read M0 value to all possible Idx values. |
| 3459 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) |
| 3460 | .addReg(CurrentIdxReg) |
| 3461 | .addReg(Idx.getReg(), 0, Idx.getSubReg()); |
| 3462 | |
| 3463 | // Update EXEC, save the original EXEC value to VCC. |
| 3464 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 |
| 3465 | : AMDGPU::S_AND_SAVEEXEC_B64), |
| 3466 | NewExec) |
| 3467 | .addReg(CondReg, RegState::Kill); |
| 3468 | |
| 3469 | MRI.setSimpleHint(NewExec, CondReg); |
| 3470 | |
| 3471 | if (UseGPRIdxMode) { |
| 3472 | if (Offset == 0) { |
| 3473 | SGPRIdxReg = CurrentIdxReg; |
| 3474 | } else { |
| 3475 | SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
| 3476 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg) |
| 3477 | .addReg(CurrentIdxReg, RegState::Kill) |
| 3478 | .addImm(Offset); |
| 3479 | } |
| 3480 | } else { |
| 3481 | // Move index from VCC into M0 |
| 3482 | if (Offset == 0) { |
| 3483 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
| 3484 | .addReg(CurrentIdxReg, RegState::Kill); |
| 3485 | } else { |
| 3486 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
| 3487 | .addReg(CurrentIdxReg, RegState::Kill) |
| 3488 | .addImm(Offset); |
| 3489 | } |
| 3490 | } |
| 3491 | |
| 3492 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. |
| 3493 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
| 3494 | MachineInstr *InsertPt = |
| 3495 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term |
| 3496 | : AMDGPU::S_XOR_B64_term), Exec) |
| 3497 | .addReg(Exec) |
| 3498 | .addReg(NewExec); |
| 3499 | |
| 3500 | // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use |
| 3501 | // s_cbranch_scc0? |
| 3502 | |
| 3503 | // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. |
| 3504 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) |
| 3505 | .addMBB(&LoopBB); |
| 3506 | |
| 3507 | return InsertPt->getIterator(); |
| 3508 | } |
| 3509 | |
| 3510 | // This has slightly sub-optimal regalloc when the source vector is killed by |
| 3511 | // the read. The register allocator does not understand that the kill is |
| 3512 | // per-workitem, so is kept alive for the whole loop so we end up not re-using a |
| 3513 | // subregister from it, using 1 more VGPR than necessary. This was saved when |
| 3514 | // this was expanded after register allocation. |
| 3515 | static MachineBasicBlock::iterator |
| 3516 | loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI, |
| 3517 | unsigned InitResultReg, unsigned PhiReg, int Offset, |
| 3518 | bool UseGPRIdxMode, Register &SGPRIdxReg) { |
| 3519 | MachineFunction *MF = MBB.getParent(); |
| 3520 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 3521 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 3522 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 3523 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3524 | MachineBasicBlock::iterator I(&MI); |
| 3525 | |
| 3526 | const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
| 3527 | Register DstReg = MI.getOperand(0).getReg(); |
| 3528 | Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); |
| 3529 | Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); |
| 3530 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
| 3531 | unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
| 3532 | |
| 3533 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); |
| 3534 | |
| 3535 | // Save the EXEC mask |
| 3536 | BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) |
| 3537 | .addReg(Exec); |
| 3538 | |
| 3539 | MachineBasicBlock *LoopBB; |
| 3540 | MachineBasicBlock *RemainderBB; |
| 3541 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); |
| 3542 | |
| 3543 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
| 3544 | |
| 3545 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, |
| 3546 | InitResultReg, DstReg, PhiReg, TmpExec, |
| 3547 | Offset, UseGPRIdxMode, SGPRIdxReg); |
| 3548 | |
| 3549 | MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock(); |
| 3550 | MachineFunction::iterator MBBI(LoopBB); |
| 3551 | ++MBBI; |
| 3552 | MF->insert(MBBI, LandingPad); |
| 3553 | LoopBB->removeSuccessor(RemainderBB); |
| 3554 | LandingPad->addSuccessor(RemainderBB); |
| 3555 | LoopBB->addSuccessor(LandingPad); |
| 3556 | MachineBasicBlock::iterator First = LandingPad->begin(); |
| 3557 | BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec) |
| 3558 | .addReg(SaveExec); |
| 3559 | |
| 3560 | return InsPt; |
| 3561 | } |
| 3562 | |
| 3563 | // Returns subreg index, offset |
| 3564 | static std::pair<unsigned, int> |
| 3565 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, |
| 3566 | const TargetRegisterClass *SuperRC, |
| 3567 | unsigned VecReg, |
| 3568 | int Offset) { |
| 3569 | int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; |
| 3570 | |
| 3571 | // Skip out of bounds offsets, or else we would end up using an undefined |
| 3572 | // register. |
| 3573 | if (Offset >= NumElts || Offset < 0) |
| 3574 | return std::make_pair(AMDGPU::sub0, Offset); |
| 3575 | |
| 3576 | return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); |
| 3577 | } |
| 3578 | |
| 3579 | static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, |
| 3580 | MachineRegisterInfo &MRI, MachineInstr &MI, |
| 3581 | int Offset) { |
| 3582 | MachineBasicBlock *MBB = MI.getParent(); |
| 3583 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3584 | MachineBasicBlock::iterator I(&MI); |
| 3585 | |
| 3586 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
| 3587 | |
| 3588 | assert(Idx->getReg() != AMDGPU::NoRegister); |
| 3589 | |
| 3590 | if (Offset == 0) { |
| 3591 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); |
| 3592 | } else { |
| 3593 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
| 3594 | .add(*Idx) |
| 3595 | .addImm(Offset); |
| 3596 | } |
| 3597 | } |
| 3598 | |
| 3599 | static Register getIndirectSGPRIdx(const SIInstrInfo *TII, |
| 3600 | MachineRegisterInfo &MRI, MachineInstr &MI, |
| 3601 | int Offset) { |
| 3602 | MachineBasicBlock *MBB = MI.getParent(); |
| 3603 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3604 | MachineBasicBlock::iterator I(&MI); |
| 3605 | |
| 3606 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
| 3607 | |
| 3608 | if (Offset == 0) |
| 3609 | return Idx->getReg(); |
| 3610 | |
| 3611 | Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
| 3612 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) |
| 3613 | .add(*Idx) |
| 3614 | .addImm(Offset); |
| 3615 | return Tmp; |
| 3616 | } |
| 3617 | |
| 3618 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, |
| 3619 | MachineBasicBlock &MBB, |
| 3620 | const GCNSubtarget &ST) { |
| 3621 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 3622 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
| 3623 | MachineFunction *MF = MBB.getParent(); |
| 3624 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 3625 | |
| 3626 | Register Dst = MI.getOperand(0).getReg(); |
| 3627 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
| 3628 | Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); |
| 3629 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
| 3630 | |
| 3631 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); |
| 3632 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
| 3633 | |
| 3634 | unsigned SubReg; |
| 3635 | std::tie(SubReg, Offset) |
| 3636 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); |
| 3637 | |
| 3638 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
| 3639 | |
| 3640 | // Check for a SGPR index. |
| 3641 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { |
| 3642 | MachineBasicBlock::iterator I(&MI); |
| 3643 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3644 | |
| 3645 | if (UseGPRIdxMode) { |
| 3646 | // TODO: Look at the uses to avoid the copy. This may require rescheduling |
| 3647 | // to avoid interfering with other uses, so probably requires a new |
| 3648 | // optimization pass. |
| 3649 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); |
| 3650 | |
| 3651 | const MCInstrDesc &GPRIDXDesc = |
| 3652 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); |
| 3653 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) |
| 3654 | .addReg(SrcReg) |
| 3655 | .addReg(Idx) |
| 3656 | .addImm(SubReg); |
| 3657 | } else { |
| 3658 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); |
| 3659 | |
| 3660 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
| 3661 | .addReg(SrcReg, 0, SubReg) |
| 3662 | .addReg(SrcReg, RegState::Implicit); |
| 3663 | } |
| 3664 | |
| 3665 | MI.eraseFromParent(); |
| 3666 | |
| 3667 | return &MBB; |
| 3668 | } |
| 3669 | |
| 3670 | // Control flow needs to be inserted if indexing with a VGPR. |
| 3671 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3672 | MachineBasicBlock::iterator I(&MI); |
| 3673 | |
| 3674 | Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 3675 | Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 3676 | |
| 3677 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); |
| 3678 | |
| 3679 | Register SGPRIdxReg; |
| 3680 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, |
| 3681 | UseGPRIdxMode, SGPRIdxReg); |
| 3682 | |
| 3683 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
| 3684 | |
| 3685 | if (UseGPRIdxMode) { |
| 3686 | const MCInstrDesc &GPRIDXDesc = |
| 3687 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); |
| 3688 | |
| 3689 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) |
| 3690 | .addReg(SrcReg) |
| 3691 | .addReg(SGPRIdxReg) |
| 3692 | .addImm(SubReg); |
| 3693 | } else { |
| 3694 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
| 3695 | .addReg(SrcReg, 0, SubReg) |
| 3696 | .addReg(SrcReg, RegState::Implicit); |
| 3697 | } |
| 3698 | |
| 3699 | MI.eraseFromParent(); |
| 3700 | |
| 3701 | return LoopBB; |
| 3702 | } |
| 3703 | |
| 3704 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, |
| 3705 | MachineBasicBlock &MBB, |
| 3706 | const GCNSubtarget &ST) { |
| 3707 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 3708 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
| 3709 | MachineFunction *MF = MBB.getParent(); |
| 3710 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 3711 | |
| 3712 | Register Dst = MI.getOperand(0).getReg(); |
| 3713 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); |
| 3714 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
| 3715 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); |
| 3716 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
| 3717 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); |
| 3718 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
| 3719 | |
| 3720 | // This can be an immediate, but will be folded later. |
| 3721 | assert(Val->getReg()); |
| 3722 | |
| 3723 | unsigned SubReg; |
| 3724 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, |
| 3725 | SrcVec->getReg(), |
| 3726 | Offset); |
| 3727 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
| 3728 | |
| 3729 | if (Idx->getReg() == AMDGPU::NoRegister) { |
| 3730 | MachineBasicBlock::iterator I(&MI); |
| 3731 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3732 | |
| 3733 | assert(Offset == 0); |
| 3734 | |
| 3735 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) |
| 3736 | .add(*SrcVec) |
| 3737 | .add(*Val) |
| 3738 | .addImm(SubReg); |
| 3739 | |
| 3740 | MI.eraseFromParent(); |
| 3741 | return &MBB; |
| 3742 | } |
| 3743 | |
| 3744 | // Check for a SGPR index. |
| 3745 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { |
| 3746 | MachineBasicBlock::iterator I(&MI); |
| 3747 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3748 | |
| 3749 | if (UseGPRIdxMode) { |
| 3750 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); |
| 3751 | |
| 3752 | const MCInstrDesc &GPRIDXDesc = |
| 3753 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); |
| 3754 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) |
| 3755 | .addReg(SrcVec->getReg()) |
| 3756 | .add(*Val) |
| 3757 | .addReg(Idx) |
| 3758 | .addImm(SubReg); |
| 3759 | } else { |
| 3760 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); |
| 3761 | |
| 3762 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( |
| 3763 | TRI.getRegSizeInBits(*VecRC), 32, false); |
| 3764 | BuildMI(MBB, I, DL, MovRelDesc, Dst) |
| 3765 | .addReg(SrcVec->getReg()) |
| 3766 | .add(*Val) |
| 3767 | .addImm(SubReg); |
| 3768 | } |
| 3769 | MI.eraseFromParent(); |
| 3770 | return &MBB; |
| 3771 | } |
| 3772 | |
| 3773 | // Control flow needs to be inserted if indexing with a VGPR. |
| 3774 | if (Val->isReg()) |
| 3775 | MRI.clearKillFlags(Val->getReg()); |
| 3776 | |
| 3777 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3778 | |
| 3779 | Register PhiReg = MRI.createVirtualRegister(VecRC); |
| 3780 | |
| 3781 | Register SGPRIdxReg; |
| 3782 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, |
| 3783 | UseGPRIdxMode, SGPRIdxReg); |
| 3784 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
| 3785 | |
| 3786 | if (UseGPRIdxMode) { |
| 3787 | const MCInstrDesc &GPRIDXDesc = |
| 3788 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); |
| 3789 | |
| 3790 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) |
| 3791 | .addReg(PhiReg) |
| 3792 | .add(*Val) |
| 3793 | .addReg(SGPRIdxReg) |
| 3794 | .addImm(AMDGPU::sub0); |
| 3795 | } else { |
| 3796 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( |
| 3797 | TRI.getRegSizeInBits(*VecRC), 32, false); |
| 3798 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) |
| 3799 | .addReg(PhiReg) |
| 3800 | .add(*Val) |
| 3801 | .addImm(AMDGPU::sub0); |
| 3802 | } |
| 3803 | |
| 3804 | MI.eraseFromParent(); |
| 3805 | return LoopBB; |
| 3806 | } |
| 3807 | |
| 3808 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( |
| 3809 | MachineInstr &MI, MachineBasicBlock *BB) const { |
| 3810 | |
| 3811 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 3812 | MachineFunction *MF = BB->getParent(); |
| 3813 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
| 3814 | |
| 3815 | switch (MI.getOpcode()) { |
| 3816 | case AMDGPU::S_UADDO_PSEUDO: |
| 3817 | case AMDGPU::S_USUBO_PSEUDO: { |
| 3818 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3819 | MachineOperand &Dest0 = MI.getOperand(0); |
| 3820 | MachineOperand &Dest1 = MI.getOperand(1); |
| 3821 | MachineOperand &Src0 = MI.getOperand(2); |
| 3822 | MachineOperand &Src1 = MI.getOperand(3); |
| 3823 | |
| 3824 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO) |
| 3825 | ? AMDGPU::S_ADD_I32 |
| 3826 | : AMDGPU::S_SUB_I32; |
| 3827 | BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); |
| 3828 | |
| 3829 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) |
| 3830 | .addImm(1) |
| 3831 | .addImm(0); |
| 3832 | |
| 3833 | MI.eraseFromParent(); |
| 3834 | return BB; |
| 3835 | } |
| 3836 | case AMDGPU::S_ADD_U64_PSEUDO: |
| 3837 | case AMDGPU::S_SUB_U64_PSEUDO: { |
| 3838 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 3839 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 3840 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 3841 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
| 3842 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3843 | |
| 3844 | MachineOperand &Dest = MI.getOperand(0); |
| 3845 | MachineOperand &Src0 = MI.getOperand(1); |
| 3846 | MachineOperand &Src1 = MI.getOperand(2); |
| 3847 | |
| 3848 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3849 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3850 | |
| 3851 | MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm( |
| 3852 | MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); |
| 3853 | MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm( |
| 3854 | MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); |
| 3855 | |
| 3856 | MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm( |
| 3857 | MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); |
| 3858 | MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm( |
| 3859 | MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); |
| 3860 | |
| 3861 | bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); |
| 3862 | |
| 3863 | unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; |
| 3864 | unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; |
| 3865 | BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0); |
| 3866 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1); |
| 3867 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) |
| 3868 | .addReg(DestSub0) |
| 3869 | .addImm(AMDGPU::sub0) |
| 3870 | .addReg(DestSub1) |
| 3871 | .addImm(AMDGPU::sub1); |
| 3872 | MI.eraseFromParent(); |
| 3873 | return BB; |
| 3874 | } |
| 3875 | case AMDGPU::V_ADD_U64_PSEUDO: |
| 3876 | case AMDGPU::V_SUB_U64_PSEUDO: { |
| 3877 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 3878 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 3879 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 3880 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3881 | |
| 3882 | bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); |
| 3883 | |
| 3884 | const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
| 3885 | |
| 3886 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 3887 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 3888 | |
| 3889 | Register CarryReg = MRI.createVirtualRegister(CarryRC); |
| 3890 | Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); |
| 3891 | |
| 3892 | MachineOperand &Dest = MI.getOperand(0); |
| 3893 | MachineOperand &Src0 = MI.getOperand(1); |
| 3894 | MachineOperand &Src1 = MI.getOperand(2); |
| 3895 | |
| 3896 | const TargetRegisterClass *Src0RC = Src0.isReg() |
| 3897 | ? MRI.getRegClass(Src0.getReg()) |
| 3898 | : &AMDGPU::VReg_64RegClass; |
| 3899 | const TargetRegisterClass *Src1RC = Src1.isReg() |
| 3900 | ? MRI.getRegClass(Src1.getReg()) |
| 3901 | : &AMDGPU::VReg_64RegClass; |
| 3902 | |
| 3903 | const TargetRegisterClass *Src0SubRC = |
| 3904 | TRI->getSubRegClass(Src0RC, AMDGPU::sub0); |
| 3905 | const TargetRegisterClass *Src1SubRC = |
| 3906 | TRI->getSubRegClass(Src1RC, AMDGPU::sub1); |
| 3907 | |
| 3908 | MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm( |
| 3909 | MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); |
| 3910 | MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm( |
| 3911 | MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); |
| 3912 | |
| 3913 | MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm( |
| 3914 | MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); |
| 3915 | MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm( |
| 3916 | MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); |
| 3917 | |
| 3918 | unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; |
| 3919 | MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) |
| 3920 | .addReg(CarryReg, RegState::Define) |
| 3921 | .add(SrcReg0Sub0) |
| 3922 | .add(SrcReg1Sub0) |
| 3923 | .addImm(0); // clamp bit |
| 3924 | |
| 3925 | unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; |
| 3926 | MachineInstr *HiHalf = |
| 3927 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) |
| 3928 | .addReg(DeadCarryReg, RegState::Define | RegState::Dead) |
| 3929 | .add(SrcReg0Sub1) |
| 3930 | .add(SrcReg1Sub1) |
| 3931 | .addReg(CarryReg, RegState::Kill) |
| 3932 | .addImm(0); // clamp bit |
| 3933 | |
| 3934 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) |
| 3935 | .addReg(DestSub0) |
| 3936 | .addImm(AMDGPU::sub0) |
| 3937 | .addReg(DestSub1) |
| 3938 | .addImm(AMDGPU::sub1); |
| 3939 | TII->legalizeOperands(*LoHalf); |
| 3940 | TII->legalizeOperands(*HiHalf); |
| 3941 | MI.eraseFromParent(); |
| 3942 | return BB; |
| 3943 | } |
| 3944 | case AMDGPU::S_ADD_CO_PSEUDO: |
| 3945 | case AMDGPU::S_SUB_CO_PSEUDO: { |
| 3946 | // This pseudo has a chance to be selected |
| 3947 | // only from uniform add/subcarry node. All the VGPR operands |
| 3948 | // therefore assumed to be splat vectors. |
| 3949 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 3950 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 3951 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 3952 | MachineBasicBlock::iterator MII = MI; |
| 3953 | const DebugLoc &DL = MI.getDebugLoc(); |
| 3954 | MachineOperand &Dest = MI.getOperand(0); |
| 3955 | MachineOperand &CarryDest = MI.getOperand(1); |
| 3956 | MachineOperand &Src0 = MI.getOperand(2); |
| 3957 | MachineOperand &Src1 = MI.getOperand(3); |
| 3958 | MachineOperand &Src2 = MI.getOperand(4); |
| 3959 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) |
| 3960 | ? AMDGPU::S_ADDC_U32 |
| 3961 | : AMDGPU::S_SUBB_U32; |
| 3962 | if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) { |
| 3963 | Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3964 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0) |
| 3965 | .addReg(Src0.getReg()); |
| 3966 | Src0.setReg(RegOp0); |
| 3967 | } |
| 3968 | if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) { |
| 3969 | Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3970 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1) |
| 3971 | .addReg(Src1.getReg()); |
| 3972 | Src1.setReg(RegOp1); |
| 3973 | } |
| 3974 | Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3975 | if (TRI->isVectorRegister(MRI, Src2.getReg())) { |
| 3976 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2) |
| 3977 | .addReg(Src2.getReg()); |
| 3978 | Src2.setReg(RegOp2); |
| 3979 | } |
| 3980 | |
| 3981 | const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg()); |
| 3982 | if (TRI->getRegSizeInBits(*Src2RC) == 64) { |
| 3983 | if (ST.hasScalarCompareEq64()) { |
| 3984 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64)) |
| 3985 | .addReg(Src2.getReg()) |
| 3986 | .addImm(0); |
| 3987 | } else { |
| 3988 | const TargetRegisterClass *SubRC = |
| 3989 | TRI->getSubRegClass(Src2RC, AMDGPU::sub0); |
| 3990 | MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm( |
| 3991 | MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC); |
| 3992 | MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm( |
| 3993 | MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC); |
| 3994 | Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 3995 | |
| 3996 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32) |
| 3997 | .add(Src2Sub0) |
| 3998 | .add(Src2Sub1); |
| 3999 | |
| 4000 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32)) |
| 4001 | .addReg(Src2_32, RegState::Kill) |
| 4002 | .addImm(0); |
| 4003 | } |
| 4004 | } else { |
| 4005 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32)) |
| 4006 | .addReg(Src2.getReg()) |
| 4007 | .addImm(0); |
| 4008 | } |
| 4009 | |
| 4010 | BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); |
| 4011 | |
| 4012 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::COPY), CarryDest.getReg()) |
| 4013 | .addReg(AMDGPU::SCC); |
| 4014 | MI.eraseFromParent(); |
| 4015 | return BB; |
| 4016 | } |
| 4017 | case AMDGPU::SI_INIT_M0: { |
| 4018 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), |
| 4019 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
| 4020 | .add(MI.getOperand(0)); |
| 4021 | MI.eraseFromParent(); |
| 4022 | return BB; |
| 4023 | } |
| 4024 | case AMDGPU::GET_GROUPSTATICSIZE: { |
| 4025 | assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || |
| 4026 | getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL); |
| 4027 | DebugLoc DL = MI.getDebugLoc(); |
| 4028 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) |
| 4029 | .add(MI.getOperand(0)) |
| 4030 | .addImm(MFI->getLDSSize()); |
| 4031 | MI.eraseFromParent(); |
| 4032 | return BB; |
| 4033 | } |
| 4034 | case AMDGPU::SI_INDIRECT_SRC_V1: |
| 4035 | case AMDGPU::SI_INDIRECT_SRC_V2: |
| 4036 | case AMDGPU::SI_INDIRECT_SRC_V4: |
| 4037 | case AMDGPU::SI_INDIRECT_SRC_V8: |
| 4038 | case AMDGPU::SI_INDIRECT_SRC_V16: |
| 4039 | case AMDGPU::SI_INDIRECT_SRC_V32: |
| 4040 | return emitIndirectSrc(MI, *BB, *getSubtarget()); |
| 4041 | case AMDGPU::SI_INDIRECT_DST_V1: |
| 4042 | case AMDGPU::SI_INDIRECT_DST_V2: |
| 4043 | case AMDGPU::SI_INDIRECT_DST_V4: |
| 4044 | case AMDGPU::SI_INDIRECT_DST_V8: |
| 4045 | case AMDGPU::SI_INDIRECT_DST_V16: |
| 4046 | case AMDGPU::SI_INDIRECT_DST_V32: |
| 4047 | return emitIndirectDst(MI, *BB, *getSubtarget()); |
| 4048 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: |
| 4049 | case AMDGPU::SI_KILL_I1_PSEUDO: |
| 4050 | return splitKillBlock(MI, BB); |
| 4051 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { |
| 4052 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 4053 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 4054 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 4055 | |
| 4056 | Register Dst = MI.getOperand(0).getReg(); |
| 4057 | Register Src0 = MI.getOperand(1).getReg(); |
| 4058 | Register Src1 = MI.getOperand(2).getReg(); |
| 4059 | const DebugLoc &DL = MI.getDebugLoc(); |
| 4060 | Register SrcCond = MI.getOperand(3).getReg(); |
| 4061 | |
| 4062 | Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 4063 | Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 4064 | const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
| 4065 | Register SrcCondCopy = MRI.createVirtualRegister(CondRC); |
| 4066 | |
| 4067 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) |
| 4068 | .addReg(SrcCond); |
| 4069 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) |
| 4070 | .addImm(0) |
| 4071 | .addReg(Src0, 0, AMDGPU::sub0) |
| 4072 | .addImm(0) |
| 4073 | .addReg(Src1, 0, AMDGPU::sub0) |
| 4074 | .addReg(SrcCondCopy); |
| 4075 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) |
| 4076 | .addImm(0) |
| 4077 | .addReg(Src0, 0, AMDGPU::sub1) |
| 4078 | .addImm(0) |
| 4079 | .addReg(Src1, 0, AMDGPU::sub1) |
| 4080 | .addReg(SrcCondCopy); |
| 4081 | |
| 4082 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) |
| 4083 | .addReg(DstLo) |
| 4084 | .addImm(AMDGPU::sub0) |
| 4085 | .addReg(DstHi) |
| 4086 | .addImm(AMDGPU::sub1); |
| 4087 | MI.eraseFromParent(); |
| 4088 | return BB; |
| 4089 | } |
| 4090 | case AMDGPU::SI_BR_UNDEF: { |
| 4091 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 4092 | const DebugLoc &DL = MI.getDebugLoc(); |
| 4093 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
| 4094 | .add(MI.getOperand(0)); |
| 4095 | Br->getOperand(1).setIsUndef(true); // read undef SCC |
| 4096 | MI.eraseFromParent(); |
| 4097 | return BB; |
| 4098 | } |
| 4099 | case AMDGPU::ADJCALLSTACKUP: |
| 4100 | case AMDGPU::ADJCALLSTACKDOWN: { |
| 4101 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
| 4102 | MachineInstrBuilder MIB(*MF, &MI); |
| 4103 | MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) |
| 4104 | .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); |
| 4105 | return BB; |
| 4106 | } |
| 4107 | case AMDGPU::SI_CALL_ISEL: { |
| 4108 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 4109 | const DebugLoc &DL = MI.getDebugLoc(); |
| 4110 | |
| 4111 | unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); |
| 4112 | |
| 4113 | MachineInstrBuilder MIB; |
| 4114 | MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); |
| 4115 | |
| 4116 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) |
| 4117 | MIB.add(MI.getOperand(I)); |
| 4118 | |
| 4119 | MIB.cloneMemRefs(MI); |
| 4120 | MI.eraseFromParent(); |
| 4121 | return BB; |
| 4122 | } |
| 4123 | case AMDGPU::V_ADD_CO_U32_e32: |
| 4124 | case AMDGPU::V_SUB_CO_U32_e32: |
| 4125 | case AMDGPU::V_SUBREV_CO_U32_e32: { |
| 4126 | // TODO: Define distinct V_*_I32_Pseudo instructions instead. |
| 4127 | const DebugLoc &DL = MI.getDebugLoc(); |
| 4128 | unsigned Opc = MI.getOpcode(); |
| 4129 | |
| 4130 | bool NeedClampOperand = false; |
| 4131 | if (TII->pseudoToMCOpcode(Opc) == -1) { |
| 4132 | Opc = AMDGPU::getVOPe64(Opc); |
| 4133 | NeedClampOperand = true; |
| 4134 | } |
| 4135 | |
| 4136 | auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); |
| 4137 | if (TII->isVOP3(*I)) { |
| 4138 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
| 4139 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 4140 | I.addReg(TRI->getVCC(), RegState::Define); |
| 4141 | } |
| 4142 | I.add(MI.getOperand(1)) |
| 4143 | .add(MI.getOperand(2)); |
| 4144 | if (NeedClampOperand) |
| 4145 | I.addImm(0); // clamp bit for e64 encoding |
| 4146 | |
| 4147 | TII->legalizeOperands(*I); |
| 4148 | |
| 4149 | MI.eraseFromParent(); |
| 4150 | return BB; |
| 4151 | } |
| 4152 | case AMDGPU::DS_GWS_INIT: |
| 4153 | case AMDGPU::DS_GWS_SEMA_V: |
| 4154 | case AMDGPU::DS_GWS_SEMA_BR: |
| 4155 | case AMDGPU::DS_GWS_SEMA_P: |
| 4156 | case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: |
| 4157 | case AMDGPU::DS_GWS_BARRIER: |
| 4158 | // A s_waitcnt 0 is required to be the instruction immediately following. |
| 4159 | if (getSubtarget()->hasGWSAutoReplay()) { |
| 4160 | bundleInstWithWaitcnt(MI); |
| 4161 | return BB; |
| 4162 | } |
| 4163 | |
| 4164 | return emitGWSMemViolTestLoop(MI, BB); |
| 4165 | case AMDGPU::S_SETREG_B32: { |
| 4166 | // Try to optimize cases that only set the denormal mode or rounding mode. |
| 4167 | // |
| 4168 | // If the s_setreg_b32 fully sets all of the bits in the rounding mode or |
| 4169 | // denormal mode to a constant, we can use s_round_mode or s_denorm_mode |
| 4170 | // instead. |
| 4171 | // |
| 4172 | // FIXME: This could be predicates on the immediate, but tablegen doesn't |
| 4173 | // allow you to have a no side effect instruction in the output of a |
| 4174 | // sideeffecting pattern. |
| 4175 | unsigned ID, Offset, Width; |
| 4176 | AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); |
| 4177 | if (ID != AMDGPU::Hwreg::ID_MODE) |
| 4178 | return BB; |
| 4179 | |
| 4180 | const unsigned WidthMask = maskTrailingOnes<unsigned>(Width); |
| 4181 | const unsigned SetMask = WidthMask << Offset; |
| 4182 | |
| 4183 | if (getSubtarget()->hasDenormModeInst()) { |
| 4184 | unsigned SetDenormOp = 0; |
| 4185 | unsigned SetRoundOp = 0; |
| 4186 | |
| 4187 | // The dedicated instructions can only set the whole denorm or round mode |
| 4188 | // at once, not a subset of bits in either. |
| 4189 | if (SetMask == |
| 4190 | (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { |
| 4191 | // If this fully sets both the round and denorm mode, emit the two |
| 4192 | // dedicated instructions for these. |
| 4193 | SetRoundOp = AMDGPU::S_ROUND_MODE; |
| 4194 | SetDenormOp = AMDGPU::S_DENORM_MODE; |
| 4195 | } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { |
| 4196 | SetRoundOp = AMDGPU::S_ROUND_MODE; |
| 4197 | } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { |
| 4198 | SetDenormOp = AMDGPU::S_DENORM_MODE; |
| 4199 | } |
| 4200 | |
| 4201 | if (SetRoundOp || SetDenormOp) { |
| 4202 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
| 4203 | MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); |
| 4204 | if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { |
| 4205 | unsigned ImmVal = Def->getOperand(1).getImm(); |
| 4206 | if (SetRoundOp) { |
| 4207 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) |
| 4208 | .addImm(ImmVal & 0xf); |
| 4209 | |
| 4210 | // If we also have the denorm mode, get just the denorm mode bits. |
| 4211 | ImmVal >>= 4; |
| 4212 | } |
| 4213 | |
| 4214 | if (SetDenormOp) { |
| 4215 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) |
| 4216 | .addImm(ImmVal & 0xf); |
| 4217 | } |
| 4218 | |
| 4219 | MI.eraseFromParent(); |
| 4220 | return BB; |
| 4221 | } |
| 4222 | } |
| 4223 | } |
| 4224 | |
| 4225 | // If only FP bits are touched, used the no side effects pseudo. |
| 4226 | if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | |
| 4227 | AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) |
| 4228 | MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); |
| 4229 | |
| 4230 | return BB; |
| 4231 | } |
| 4232 | default: |
| 4233 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
| 4234 | } |
| 4235 | } |
| 4236 | |
| 4237 | bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { |
| 4238 | return isTypeLegal(VT.getScalarType()); |
| 4239 | } |
| 4240 | |
| 4241 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
| 4242 | // This currently forces unfolding various combinations of fsub into fma with |
| 4243 | // free fneg'd operands. As long as we have fast FMA (controlled by |
| 4244 | // isFMAFasterThanFMulAndFAdd), we should perform these. |
| 4245 | |
| 4246 | // When fma is quarter rate, for f64 where add / sub are at best half rate, |
| 4247 | // most of these combines appear to be cycle neutral but save on instruction |
| 4248 | // count / code size. |
| 4249 | return true; |
| 4250 | } |
| 4251 | |
| 4252 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, |
| 4253 | EVT VT) const { |
| 4254 | if (!VT.isVector()) { |
| 4255 | return MVT::i1; |
| 4256 | } |
| 4257 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
| 4258 | } |
| 4259 | |
| 4260 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { |
| 4261 | // TODO: Should i16 be used always if legal? For now it would force VALU |
| 4262 | // shifts. |
| 4263 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; |
| 4264 | } |
| 4265 | |
| 4266 | LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { |
| 4267 | return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts()) |
| 4268 | ? Ty.changeElementSize(16) |
| 4269 | : Ty.changeElementSize(32); |
| 4270 | } |
| 4271 | |
| 4272 | // Answering this is somewhat tricky and depends on the specific device which |
| 4273 | // have different rates for fma or all f64 operations. |
| 4274 | // |
| 4275 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other |
| 4276 | // regardless of which device (although the number of cycles differs between |
| 4277 | // devices), so it is always profitable for f64. |
| 4278 | // |
| 4279 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable |
| 4280 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 |
| 4281 | // which we can always do even without fused FP ops since it returns the same |
| 4282 | // result as the separate operations and since it is always full |
| 4283 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 |
| 4284 | // however does not support denormals, so we do report fma as faster if we have |
| 4285 | // a fast fma device and require denormals. |
| 4286 | // |
| 4287 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 4288 | EVT VT) const { |
| 4289 | VT = VT.getScalarType(); |
| 4290 | |
| 4291 | switch (VT.getSimpleVT().SimpleTy) { |
| 4292 | case MVT::f32: { |
| 4293 | // If mad is not available this depends only on if f32 fma is full rate. |
| 4294 | if (!Subtarget->hasMadMacF32Insts()) |
| 4295 | return Subtarget->hasFastFMAF32(); |
| 4296 | |
| 4297 | // Otherwise f32 mad is always full rate and returns the same result as |
| 4298 | // the separate operations so should be preferred over fma. |
| 4299 | // However does not support denomals. |
| 4300 | if (hasFP32Denormals(MF)) |
| 4301 | return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); |
| 4302 | |
| 4303 | // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. |
| 4304 | return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); |
| 4305 | } |
| 4306 | case MVT::f64: |
| 4307 | return true; |
| 4308 | case MVT::f16: |
| 4309 | return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); |
| 4310 | default: |
| 4311 | break; |
| 4312 | } |
| 4313 | |
| 4314 | return false; |
| 4315 | } |
| 4316 | |
| 4317 | bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG, |
| 4318 | const SDNode *N) const { |
| 4319 | // TODO: Check future ftz flag |
| 4320 | // v_mad_f32/v_mac_f32 do not support denormals. |
| 4321 | EVT VT = N->getValueType(0); |
| 4322 | if (VT == MVT::f32) |
| 4323 | return Subtarget->hasMadMacF32Insts() && |
| 4324 | !hasFP32Denormals(DAG.getMachineFunction()); |
| 4325 | if (VT == MVT::f16) { |
| 4326 | return Subtarget->hasMadF16() && |
| 4327 | !hasFP64FP16Denormals(DAG.getMachineFunction()); |
| 4328 | } |
| 4329 | |
| 4330 | return false; |
| 4331 | } |
| 4332 | |
| 4333 | //===----------------------------------------------------------------------===// |
| 4334 | // Custom DAG Lowering Operations |
| 4335 | //===----------------------------------------------------------------------===// |
| 4336 | |
| 4337 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the |
| 4338 | // wider vector type is legal. |
| 4339 | SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, |
| 4340 | SelectionDAG &DAG) const { |
| 4341 | unsigned Opc = Op.getOpcode(); |
| 4342 | EVT VT = Op.getValueType(); |
| 4343 | assert(VT == MVT::v4f16 || VT == MVT::v4i16); |
| 4344 | |
| 4345 | SDValue Lo, Hi; |
| 4346 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); |
| 4347 | |
| 4348 | SDLoc SL(Op); |
| 4349 | SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, |
| 4350 | Op->getFlags()); |
| 4351 | SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, |
| 4352 | Op->getFlags()); |
| 4353 | |
| 4354 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
| 4355 | } |
| 4356 | |
| 4357 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the |
| 4358 | // wider vector type is legal. |
| 4359 | SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, |
| 4360 | SelectionDAG &DAG) const { |
| 4361 | unsigned Opc = Op.getOpcode(); |
| 4362 | EVT VT = Op.getValueType(); |
| 4363 | assert(VT == MVT::v4i16 || VT == MVT::v4f16); |
| 4364 | |
| 4365 | SDValue Lo0, Hi0; |
| 4366 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
| 4367 | SDValue Lo1, Hi1; |
| 4368 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
| 4369 | |
| 4370 | SDLoc SL(Op); |
| 4371 | |
| 4372 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, |
| 4373 | Op->getFlags()); |
| 4374 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, |
| 4375 | Op->getFlags()); |
| 4376 | |
| 4377 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
| 4378 | } |
| 4379 | |
| 4380 | SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, |
| 4381 | SelectionDAG &DAG) const { |
| 4382 | unsigned Opc = Op.getOpcode(); |
| 4383 | EVT VT = Op.getValueType(); |
| 4384 | assert(VT == MVT::v4i16 || VT == MVT::v4f16); |
| 4385 | |
| 4386 | SDValue Lo0, Hi0; |
| 4387 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
| 4388 | SDValue Lo1, Hi1; |
| 4389 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
| 4390 | SDValue Lo2, Hi2; |
| 4391 | std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); |
| 4392 | |
| 4393 | SDLoc SL(Op); |
| 4394 | |
| 4395 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2, |
| 4396 | Op->getFlags()); |
| 4397 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2, |
| 4398 | Op->getFlags()); |
| 4399 | |
| 4400 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
| 4401 | } |
| 4402 | |
| 4403 | |
| 4404 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 4405 | switch (Op.getOpcode()) { |
| 4406 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
| 4407 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 4408 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 4409 | case ISD::LOAD: { |
| 4410 | SDValue Result = LowerLOAD(Op, DAG); |
| 4411 | assert((!Result.getNode() || |
| 4412 | Result.getNode()->getNumValues() == 2) && |
| 4413 | "Load should return a value and a chain" ); |
| 4414 | return Result; |
| 4415 | } |
| 4416 | |
| 4417 | case ISD::FSIN: |
| 4418 | case ISD::FCOS: |
| 4419 | return LowerTrig(Op, DAG); |
| 4420 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 4421 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
| 4422 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); |
| 4423 | case ISD::STORE: return LowerSTORE(Op, DAG); |
| 4424 | case ISD::GlobalAddress: { |
| 4425 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4426 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 4427 | return LowerGlobalAddress(MFI, Op, DAG); |
| 4428 | } |
| 4429 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
| 4430 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); |
| 4431 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
| 4432 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); |
| 4433 | case ISD::INSERT_SUBVECTOR: |
| 4434 | return lowerINSERT_SUBVECTOR(Op, DAG); |
| 4435 | case ISD::INSERT_VECTOR_ELT: |
| 4436 | return lowerINSERT_VECTOR_ELT(Op, DAG); |
| 4437 | case ISD::EXTRACT_VECTOR_ELT: |
| 4438 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); |
| 4439 | case ISD::VECTOR_SHUFFLE: |
| 4440 | return lowerVECTOR_SHUFFLE(Op, DAG); |
| 4441 | case ISD::BUILD_VECTOR: |
| 4442 | return lowerBUILD_VECTOR(Op, DAG); |
| 4443 | case ISD::FP_ROUND: |
| 4444 | return lowerFP_ROUND(Op, DAG); |
| 4445 | case ISD::TRAP: |
| 4446 | return lowerTRAP(Op, DAG); |
| 4447 | case ISD::DEBUGTRAP: |
| 4448 | return lowerDEBUGTRAP(Op, DAG); |
| 4449 | case ISD::FABS: |
| 4450 | case ISD::FNEG: |
| 4451 | case ISD::FCANONICALIZE: |
| 4452 | case ISD::BSWAP: |
| 4453 | return splitUnaryVectorOp(Op, DAG); |
| 4454 | case ISD::FMINNUM: |
| 4455 | case ISD::FMAXNUM: |
| 4456 | return lowerFMINNUM_FMAXNUM(Op, DAG); |
| 4457 | case ISD::FMA: |
| 4458 | return splitTernaryVectorOp(Op, DAG); |
| 4459 | case ISD::SHL: |
| 4460 | case ISD::SRA: |
| 4461 | case ISD::SRL: |
| 4462 | case ISD::ADD: |
| 4463 | case ISD::SUB: |
| 4464 | case ISD::MUL: |
| 4465 | case ISD::SMIN: |
| 4466 | case ISD::SMAX: |
| 4467 | case ISD::UMIN: |
| 4468 | case ISD::UMAX: |
| 4469 | case ISD::FADD: |
| 4470 | case ISD::FMUL: |
| 4471 | case ISD::FMINNUM_IEEE: |
| 4472 | case ISD::FMAXNUM_IEEE: |
| 4473 | case ISD::UADDSAT: |
| 4474 | case ISD::USUBSAT: |
| 4475 | case ISD::SADDSAT: |
| 4476 | case ISD::SSUBSAT: |
| 4477 | return splitBinaryVectorOp(Op, DAG); |
| 4478 | case ISD::SMULO: |
| 4479 | case ISD::UMULO: |
| 4480 | return lowerXMULO(Op, DAG); |
| 4481 | case ISD::DYNAMIC_STACKALLOC: |
| 4482 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 4483 | } |
| 4484 | return SDValue(); |
| 4485 | } |
| 4486 | |
| 4487 | // Used for D16: Casts the result of an instruction into the right vector, |
| 4488 | // packs values if loads return unpacked values. |
| 4489 | static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, |
| 4490 | const SDLoc &DL, |
| 4491 | SelectionDAG &DAG, bool Unpacked) { |
| 4492 | if (!LoadVT.isVector()) |
| 4493 | return Result; |
| 4494 | |
| 4495 | // Cast back to the original packed type or to a larger type that is a |
| 4496 | // multiple of 32 bit for D16. Widening the return type is a required for |
| 4497 | // legalization. |
| 4498 | EVT FittingLoadVT = LoadVT; |
| 4499 | if ((LoadVT.getVectorNumElements() % 2) == 1) { |
| 4500 | FittingLoadVT = |
| 4501 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), |
| 4502 | LoadVT.getVectorNumElements() + 1); |
| 4503 | } |
| 4504 | |
| 4505 | if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. |
| 4506 | // Truncate to v2i16/v4i16. |
| 4507 | EVT IntLoadVT = FittingLoadVT.changeTypeToInteger(); |
| 4508 | |
| 4509 | // Workaround legalizer not scalarizing truncate after vector op |
| 4510 | // legalization but not creating intermediate vector trunc. |
| 4511 | SmallVector<SDValue, 4> Elts; |
| 4512 | DAG.ExtractVectorElements(Result, Elts); |
| 4513 | for (SDValue &Elt : Elts) |
| 4514 | Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); |
| 4515 | |
| 4516 | // Pad illegal v1i16/v3fi6 to v4i16 |
| 4517 | if ((LoadVT.getVectorNumElements() % 2) == 1) |
| 4518 | Elts.push_back(DAG.getUNDEF(MVT::i16)); |
| 4519 | |
| 4520 | Result = DAG.getBuildVector(IntLoadVT, DL, Elts); |
| 4521 | |
| 4522 | // Bitcast to original type (v2f16/v4f16). |
| 4523 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); |
| 4524 | } |
| 4525 | |
| 4526 | // Cast back to the original packed type. |
| 4527 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); |
| 4528 | } |
| 4529 | |
| 4530 | SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, |
| 4531 | MemSDNode *M, |
| 4532 | SelectionDAG &DAG, |
| 4533 | ArrayRef<SDValue> Ops, |
| 4534 | bool IsIntrinsic) const { |
| 4535 | SDLoc DL(M); |
| 4536 | |
| 4537 | bool Unpacked = Subtarget->hasUnpackedD16VMem(); |
| 4538 | EVT LoadVT = M->getValueType(0); |
| 4539 | |
| 4540 | EVT EquivLoadVT = LoadVT; |
| 4541 | if (LoadVT.isVector()) { |
| 4542 | if (Unpacked) { |
| 4543 | EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, |
| 4544 | LoadVT.getVectorNumElements()); |
| 4545 | } else if ((LoadVT.getVectorNumElements() % 2) == 1) { |
| 4546 | // Widen v3f16 to legal type |
| 4547 | EquivLoadVT = |
| 4548 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), |
| 4549 | LoadVT.getVectorNumElements() + 1); |
| 4550 | } |
| 4551 | } |
| 4552 | |
| 4553 | // Change from v4f16/v2f16 to EquivLoadVT. |
| 4554 | SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); |
| 4555 | |
| 4556 | SDValue Load |
| 4557 | = DAG.getMemIntrinsicNode( |
| 4558 | IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, |
| 4559 | VTList, Ops, M->getMemoryVT(), |
| 4560 | M->getMemOperand()); |
| 4561 | |
| 4562 | SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); |
| 4563 | |
| 4564 | return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); |
| 4565 | } |
| 4566 | |
| 4567 | SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, |
| 4568 | SelectionDAG &DAG, |
| 4569 | ArrayRef<SDValue> Ops) const { |
| 4570 | SDLoc DL(M); |
| 4571 | EVT LoadVT = M->getValueType(0); |
| 4572 | EVT EltType = LoadVT.getScalarType(); |
| 4573 | EVT IntVT = LoadVT.changeTypeToInteger(); |
| 4574 | |
| 4575 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
| 4576 | |
| 4577 | unsigned Opc = |
| 4578 | IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; |
| 4579 | |
| 4580 | if (IsD16) { |
| 4581 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); |
| 4582 | } |
| 4583 | |
| 4584 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics |
| 4585 | if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) |
| 4586 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
| 4587 | |
| 4588 | if (isTypeLegal(LoadVT)) { |
| 4589 | return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, |
| 4590 | M->getMemOperand(), DAG); |
| 4591 | } |
| 4592 | |
| 4593 | EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); |
| 4594 | SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); |
| 4595 | SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, |
| 4596 | M->getMemOperand(), DAG); |
| 4597 | return DAG.getMergeValues( |
| 4598 | {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, |
| 4599 | DL); |
| 4600 | } |
| 4601 | |
| 4602 | static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, |
| 4603 | SDNode *N, SelectionDAG &DAG) { |
| 4604 | EVT VT = N->getValueType(0); |
| 4605 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
| 4606 | unsigned CondCode = CD->getZExtValue(); |
| 4607 | if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode))) |
| 4608 | return DAG.getUNDEF(VT); |
| 4609 | |
| 4610 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); |
| 4611 | |
| 4612 | SDValue LHS = N->getOperand(1); |
| 4613 | SDValue RHS = N->getOperand(2); |
| 4614 | |
| 4615 | SDLoc DL(N); |
| 4616 | |
| 4617 | EVT CmpVT = LHS.getValueType(); |
| 4618 | if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { |
| 4619 | unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? |
| 4620 | ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 4621 | LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); |
| 4622 | RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); |
| 4623 | } |
| 4624 | |
| 4625 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); |
| 4626 | |
| 4627 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
| 4628 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
| 4629 | |
| 4630 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, |
| 4631 | DAG.getCondCode(CCOpcode)); |
| 4632 | if (VT.bitsEq(CCVT)) |
| 4633 | return SetCC; |
| 4634 | return DAG.getZExtOrTrunc(SetCC, DL, VT); |
| 4635 | } |
| 4636 | |
| 4637 | static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, |
| 4638 | SDNode *N, SelectionDAG &DAG) { |
| 4639 | EVT VT = N->getValueType(0); |
| 4640 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
| 4641 | |
| 4642 | unsigned CondCode = CD->getZExtValue(); |
| 4643 | if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode))) |
| 4644 | return DAG.getUNDEF(VT); |
| 4645 | |
| 4646 | SDValue Src0 = N->getOperand(1); |
| 4647 | SDValue Src1 = N->getOperand(2); |
| 4648 | EVT CmpVT = Src0.getValueType(); |
| 4649 | SDLoc SL(N); |
| 4650 | |
| 4651 | if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { |
| 4652 | Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
| 4653 | Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
| 4654 | } |
| 4655 | |
| 4656 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); |
| 4657 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); |
| 4658 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
| 4659 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
| 4660 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, |
| 4661 | Src1, DAG.getCondCode(CCOpcode)); |
| 4662 | if (VT.bitsEq(CCVT)) |
| 4663 | return SetCC; |
| 4664 | return DAG.getZExtOrTrunc(SetCC, SL, VT); |
| 4665 | } |
| 4666 | |
| 4667 | static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N, |
| 4668 | SelectionDAG &DAG) { |
| 4669 | EVT VT = N->getValueType(0); |
| 4670 | SDValue Src = N->getOperand(1); |
| 4671 | SDLoc SL(N); |
| 4672 | |
| 4673 | if (Src.getOpcode() == ISD::SETCC) { |
| 4674 | // (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...) |
| 4675 | return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0), |
| 4676 | Src.getOperand(1), Src.getOperand(2)); |
| 4677 | } |
| 4678 | if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) { |
| 4679 | // (ballot 0) -> 0 |
| 4680 | if (Arg->isNullValue()) |
| 4681 | return DAG.getConstant(0, SL, VT); |
| 4682 | |
| 4683 | // (ballot 1) -> EXEC/EXEC_LO |
| 4684 | if (Arg->isOne()) { |
| 4685 | Register Exec; |
| 4686 | if (VT.getScalarSizeInBits() == 32) |
| 4687 | Exec = AMDGPU::EXEC_LO; |
| 4688 | else if (VT.getScalarSizeInBits() == 64) |
| 4689 | Exec = AMDGPU::EXEC; |
| 4690 | else |
| 4691 | return SDValue(); |
| 4692 | |
| 4693 | return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT); |
| 4694 | } |
| 4695 | } |
| 4696 | |
| 4697 | // (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0) |
| 4698 | // ISD::SETNE) |
| 4699 | return DAG.getNode( |
| 4700 | AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32), |
| 4701 | DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE)); |
| 4702 | } |
| 4703 | |
| 4704 | void SITargetLowering::ReplaceNodeResults(SDNode *N, |
| 4705 | SmallVectorImpl<SDValue> &Results, |
| 4706 | SelectionDAG &DAG) const { |
| 4707 | switch (N->getOpcode()) { |
| 4708 | case ISD::INSERT_VECTOR_ELT: { |
| 4709 | if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) |
| 4710 | Results.push_back(Res); |
| 4711 | return; |
| 4712 | } |
| 4713 | case ISD::EXTRACT_VECTOR_ELT: { |
| 4714 | if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) |
| 4715 | Results.push_back(Res); |
| 4716 | return; |
| 4717 | } |
| 4718 | case ISD::INTRINSIC_WO_CHAIN: { |
| 4719 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
| 4720 | switch (IID) { |
| 4721 | case Intrinsic::amdgcn_cvt_pkrtz: { |
| 4722 | SDValue Src0 = N->getOperand(1); |
| 4723 | SDValue Src1 = N->getOperand(2); |
| 4724 | SDLoc SL(N); |
| 4725 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, |
| 4726 | Src0, Src1); |
| 4727 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); |
| 4728 | return; |
| 4729 | } |
| 4730 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
| 4731 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
| 4732 | case Intrinsic::amdgcn_cvt_pk_i16: |
| 4733 | case Intrinsic::amdgcn_cvt_pk_u16: { |
| 4734 | SDValue Src0 = N->getOperand(1); |
| 4735 | SDValue Src1 = N->getOperand(2); |
| 4736 | SDLoc SL(N); |
| 4737 | unsigned Opcode; |
| 4738 | |
| 4739 | if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) |
| 4740 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
| 4741 | else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) |
| 4742 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
| 4743 | else if (IID == Intrinsic::amdgcn_cvt_pk_i16) |
| 4744 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
| 4745 | else |
| 4746 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
| 4747 | |
| 4748 | EVT VT = N->getValueType(0); |
| 4749 | if (isTypeLegal(VT)) |
| 4750 | Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); |
| 4751 | else { |
| 4752 | SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); |
| 4753 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); |
| 4754 | } |
| 4755 | return; |
| 4756 | } |
| 4757 | } |
| 4758 | break; |
| 4759 | } |
| 4760 | case ISD::INTRINSIC_W_CHAIN: { |
| 4761 | if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { |
| 4762 | if (Res.getOpcode() == ISD::MERGE_VALUES) { |
| 4763 | // FIXME: Hacky |
| 4764 | for (unsigned I = 0; I < Res.getNumOperands(); I++) { |
| 4765 | Results.push_back(Res.getOperand(I)); |
| 4766 | } |
| 4767 | } else { |
| 4768 | Results.push_back(Res); |
| 4769 | Results.push_back(Res.getValue(1)); |
| 4770 | } |
| 4771 | return; |
| 4772 | } |
| 4773 | |
| 4774 | break; |
| 4775 | } |
| 4776 | case ISD::SELECT: { |
| 4777 | SDLoc SL(N); |
| 4778 | EVT VT = N->getValueType(0); |
| 4779 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); |
| 4780 | SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); |
| 4781 | SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); |
| 4782 | |
| 4783 | EVT SelectVT = NewVT; |
| 4784 | if (NewVT.bitsLT(MVT::i32)) { |
| 4785 | LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); |
| 4786 | RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); |
| 4787 | SelectVT = MVT::i32; |
| 4788 | } |
| 4789 | |
| 4790 | SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, |
| 4791 | N->getOperand(0), LHS, RHS); |
| 4792 | |
| 4793 | if (NewVT != SelectVT) |
| 4794 | NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); |
| 4795 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); |
| 4796 | return; |
| 4797 | } |
| 4798 | case ISD::FNEG: { |
| 4799 | if (N->getValueType(0) != MVT::v2f16) |
| 4800 | break; |
| 4801 | |
| 4802 | SDLoc SL(N); |
| 4803 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
| 4804 | |
| 4805 | SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, |
| 4806 | BC, |
| 4807 | DAG.getConstant(0x80008000, SL, MVT::i32)); |
| 4808 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
| 4809 | return; |
| 4810 | } |
| 4811 | case ISD::FABS: { |
| 4812 | if (N->getValueType(0) != MVT::v2f16) |
| 4813 | break; |
| 4814 | |
| 4815 | SDLoc SL(N); |
| 4816 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
| 4817 | |
| 4818 | SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, |
| 4819 | BC, |
| 4820 | DAG.getConstant(0x7fff7fff, SL, MVT::i32)); |
| 4821 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
| 4822 | return; |
| 4823 | } |
| 4824 | default: |
| 4825 | break; |
| 4826 | } |
| 4827 | } |
| 4828 | |
| 4829 | /// Helper function for LowerBRCOND |
| 4830 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
| 4831 | |
| 4832 | SDNode *Parent = Value.getNode(); |
| 4833 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
| 4834 | I != E; ++I) { |
| 4835 | |
| 4836 | if (I.getUse().get() != Value) |
| 4837 | continue; |
| 4838 | |
| 4839 | if (I->getOpcode() == Opcode) |
| 4840 | return *I; |
| 4841 | } |
| 4842 | return nullptr; |
| 4843 | } |
| 4844 | |
| 4845 | unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { |
| 4846 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { |
| 4847 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { |
| 4848 | case Intrinsic::amdgcn_if: |
| 4849 | return AMDGPUISD::IF; |
| 4850 | case Intrinsic::amdgcn_else: |
| 4851 | return AMDGPUISD::ELSE; |
| 4852 | case Intrinsic::amdgcn_loop: |
| 4853 | return AMDGPUISD::LOOP; |
| 4854 | case Intrinsic::amdgcn_end_cf: |
| 4855 | llvm_unreachable("should not occur" ); |
| 4856 | default: |
| 4857 | return 0; |
| 4858 | } |
| 4859 | } |
| 4860 | |
| 4861 | // break, if_break, else_break are all only used as inputs to loop, not |
| 4862 | // directly as branch conditions. |
| 4863 | return 0; |
| 4864 | } |
| 4865 | |
| 4866 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { |
| 4867 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 4868 | return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
| 4869 | GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
| 4870 | AMDGPU::shouldEmitConstantsToTextSection(TT); |
| 4871 | } |
| 4872 | |
| 4873 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { |
| 4874 | // FIXME: Either avoid relying on address space here or change the default |
| 4875 | // address space for functions to avoid the explicit check. |
| 4876 | return (GV->getValueType()->isFunctionTy() || |
| 4877 | !isNonGlobalAddrSpace(GV->getAddressSpace())) && |
| 4878 | !shouldEmitFixup(GV) && |
| 4879 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); |
| 4880 | } |
| 4881 | |
| 4882 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { |
| 4883 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); |
| 4884 | } |
| 4885 | |
| 4886 | bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { |
| 4887 | if (!GV->hasExternalLinkage()) |
| 4888 | return true; |
| 4889 | |
| 4890 | const auto OS = getTargetMachine().getTargetTriple().getOS(); |
| 4891 | return OS == Triple::AMDHSA || OS == Triple::AMDPAL; |
| 4892 | } |
| 4893 | |
| 4894 | /// This transforms the control flow intrinsics to get the branch destination as |
| 4895 | /// last parameter, also switches branch target with BR if the need arise |
| 4896 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
| 4897 | SelectionDAG &DAG) const { |
| 4898 | SDLoc DL(BRCOND); |
| 4899 | |
| 4900 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
| 4901 | SDValue Target = BRCOND.getOperand(2); |
| 4902 | SDNode *BR = nullptr; |
| 4903 | SDNode *SetCC = nullptr; |
| 4904 | |
| 4905 | if (Intr->getOpcode() == ISD::SETCC) { |
| 4906 | // As long as we negate the condition everything is fine |
| 4907 | SetCC = Intr; |
| 4908 | Intr = SetCC->getOperand(0).getNode(); |
| 4909 | |
| 4910 | } else { |
| 4911 | // Get the target from BR if we don't negate the condition |
| 4912 | BR = findUser(BRCOND, ISD::BR); |
| 4913 | assert(BR && "brcond missing unconditional branch user" ); |
| 4914 | Target = BR->getOperand(1); |
| 4915 | } |
| 4916 | |
| 4917 | unsigned CFNode = isCFIntrinsic(Intr); |
| 4918 | if (CFNode == 0) { |
| 4919 | // This is a uniform branch so we don't need to legalize. |
| 4920 | return BRCOND; |
| 4921 | } |
| 4922 | |
| 4923 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || |
| 4924 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; |
| 4925 | |
| 4926 | assert(!SetCC || |
| 4927 | (SetCC->getConstantOperandVal(1) == 1 && |
| 4928 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == |
| 4929 | ISD::SETNE)); |
| 4930 | |
| 4931 | // operands of the new intrinsic call |
| 4932 | SmallVector<SDValue, 4> Ops; |
| 4933 | if (HaveChain) |
| 4934 | Ops.push_back(BRCOND.getOperand(0)); |
| 4935 | |
| 4936 | Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); |
| 4937 | Ops.push_back(Target); |
| 4938 | |
| 4939 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); |
| 4940 | |
| 4941 | // build the new intrinsic call |
| 4942 | SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); |
| 4943 | |
| 4944 | if (!HaveChain) { |
| 4945 | SDValue Ops[] = { |
| 4946 | SDValue(Result, 0), |
| 4947 | BRCOND.getOperand(0) |
| 4948 | }; |
| 4949 | |
| 4950 | Result = DAG.getMergeValues(Ops, DL).getNode(); |
| 4951 | } |
| 4952 | |
| 4953 | if (BR) { |
| 4954 | // Give the branch instruction our target |
| 4955 | SDValue Ops[] = { |
| 4956 | BR->getOperand(0), |
| 4957 | BRCOND.getOperand(2) |
| 4958 | }; |
| 4959 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
| 4960 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
| 4961 | } |
| 4962 | |
| 4963 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
| 4964 | |
| 4965 | // Copy the intrinsic results to registers |
| 4966 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
| 4967 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
| 4968 | if (!CopyToReg) |
| 4969 | continue; |
| 4970 | |
| 4971 | Chain = DAG.getCopyToReg( |
| 4972 | Chain, DL, |
| 4973 | CopyToReg->getOperand(1), |
| 4974 | SDValue(Result, i - 1), |
| 4975 | SDValue()); |
| 4976 | |
| 4977 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
| 4978 | } |
| 4979 | |
| 4980 | // Remove the old intrinsic from the chain |
| 4981 | DAG.ReplaceAllUsesOfValueWith( |
| 4982 | SDValue(Intr, Intr->getNumValues() - 1), |
| 4983 | Intr->getOperand(0)); |
| 4984 | |
| 4985 | return Chain; |
| 4986 | } |
| 4987 | |
| 4988 | SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, |
| 4989 | SelectionDAG &DAG) const { |
| 4990 | MVT VT = Op.getSimpleValueType(); |
| 4991 | SDLoc DL(Op); |
| 4992 | // Checking the depth |
| 4993 | if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) |
| 4994 | return DAG.getConstant(0, DL, VT); |
| 4995 | |
| 4996 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4997 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 4998 | // Check for kernel and shader functions |
| 4999 | if (Info->isEntryFunction()) |
| 5000 | return DAG.getConstant(0, DL, VT); |
| 5001 | |
| 5002 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5003 | // There is a call to @llvm.returnaddress in this function |
| 5004 | MFI.setReturnAddressIsTaken(true); |
| 5005 | |
| 5006 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
| 5007 | // Get the return address reg and mark it as an implicit live-in |
| 5008 | Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); |
| 5009 | |
| 5010 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); |
| 5011 | } |
| 5012 | |
| 5013 | SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, |
| 5014 | SDValue Op, |
| 5015 | const SDLoc &DL, |
| 5016 | EVT VT) const { |
| 5017 | return Op.getValueType().bitsLE(VT) ? |
| 5018 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : |
| 5019 | DAG.getNode(ISD::FP_ROUND, DL, VT, Op, |
| 5020 | DAG.getTargetConstant(0, DL, MVT::i32)); |
| 5021 | } |
| 5022 | |
| 5023 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 5024 | assert(Op.getValueType() == MVT::f16 && |
| 5025 | "Do not know how to custom lower FP_ROUND for non-f16 type" ); |
| 5026 | |
| 5027 | SDValue Src = Op.getOperand(0); |
| 5028 | EVT SrcVT = Src.getValueType(); |
| 5029 | if (SrcVT != MVT::f64) |
| 5030 | return Op; |
| 5031 | |
| 5032 | SDLoc DL(Op); |
| 5033 | |
| 5034 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); |
| 5035 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); |
| 5036 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); |
| 5037 | } |
| 5038 | |
| 5039 | SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, |
| 5040 | SelectionDAG &DAG) const { |
| 5041 | EVT VT = Op.getValueType(); |
| 5042 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 5043 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 5044 | bool IsIEEEMode = Info->getMode().IEEE; |
| 5045 | |
| 5046 | // FIXME: Assert during selection that this is only selected for |
| 5047 | // ieee_mode. Currently a combine can produce the ieee version for non-ieee |
| 5048 | // mode functions, but this happens to be OK since it's only done in cases |
| 5049 | // where there is known no sNaN. |
| 5050 | if (IsIEEEMode) |
| 5051 | return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); |
| 5052 | |
| 5053 | if (VT == MVT::v4f16) |
| 5054 | return splitBinaryVectorOp(Op, DAG); |
| 5055 | return Op; |
| 5056 | } |
| 5057 | |
| 5058 | SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { |
| 5059 | EVT VT = Op.getValueType(); |
| 5060 | SDLoc SL(Op); |
| 5061 | SDValue LHS = Op.getOperand(0); |
| 5062 | SDValue RHS = Op.getOperand(1); |
| 5063 | bool isSigned = Op.getOpcode() == ISD::SMULO; |
| 5064 | |
| 5065 | if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { |
| 5066 | const APInt &C = RHSC->getAPIntValue(); |
| 5067 | // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } |
| 5068 | if (C.isPowerOf2()) { |
| 5069 | // smulo(x, signed_min) is same as umulo(x, signed_min). |
| 5070 | bool UseArithShift = isSigned && !C.isMinSignedValue(); |
| 5071 | SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32); |
| 5072 | SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt); |
| 5073 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, |
| 5074 | DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, |
| 5075 | SL, VT, Result, ShiftAmt), |
| 5076 | LHS, ISD::SETNE); |
| 5077 | return DAG.getMergeValues({ Result, Overflow }, SL); |
| 5078 | } |
| 5079 | } |
| 5080 | |
| 5081 | SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS); |
| 5082 | SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU, |
| 5083 | SL, VT, LHS, RHS); |
| 5084 | |
| 5085 | SDValue Sign = isSigned |
| 5086 | ? DAG.getNode(ISD::SRA, SL, VT, Result, |
| 5087 | DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32)) |
| 5088 | : DAG.getConstant(0, SL, VT); |
| 5089 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE); |
| 5090 | |
| 5091 | return DAG.getMergeValues({ Result, Overflow }, SL); |
| 5092 | } |
| 5093 | |
| 5094 | SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { |
| 5095 | SDLoc SL(Op); |
| 5096 | SDValue Chain = Op.getOperand(0); |
| 5097 | |
| 5098 | if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || |
| 5099 | !Subtarget->isTrapHandlerEnabled()) |
| 5100 | return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); |
| 5101 | |
| 5102 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5103 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 5104 | Register UserSGPR = Info->getQueuePtrUserSGPR(); |
| 5105 | assert(UserSGPR != AMDGPU::NoRegister); |
| 5106 | SDValue QueuePtr = CreateLiveInRegister( |
| 5107 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
| 5108 | SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); |
| 5109 | SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, |
| 5110 | QueuePtr, SDValue()); |
| 5111 | SDValue Ops[] = { |
| 5112 | ToReg, |
| 5113 | DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16), |
| 5114 | SGPR01, |
| 5115 | ToReg.getValue(1) |
| 5116 | }; |
| 5117 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
| 5118 | } |
| 5119 | |
| 5120 | SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { |
| 5121 | SDLoc SL(Op); |
| 5122 | SDValue Chain = Op.getOperand(0); |
| 5123 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5124 | |
| 5125 | if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || |
| 5126 | !Subtarget->isTrapHandlerEnabled()) { |
| 5127 | DiagnosticInfoUnsupported NoTrap(MF.getFunction(), |
| 5128 | "debugtrap handler not supported" , |
| 5129 | Op.getDebugLoc(), |
| 5130 | DS_Warning); |
| 5131 | LLVMContext &Ctx = MF.getFunction().getContext(); |
| 5132 | Ctx.diagnose(NoTrap); |
| 5133 | return Chain; |
| 5134 | } |
| 5135 | |
| 5136 | SDValue Ops[] = { |
| 5137 | Chain, |
| 5138 | DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16) |
| 5139 | }; |
| 5140 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
| 5141 | } |
| 5142 | |
| 5143 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, |
| 5144 | SelectionDAG &DAG) const { |
| 5145 | // FIXME: Use inline constants (src_{shared, private}_base) instead. |
| 5146 | if (Subtarget->hasApertureRegs()) { |
| 5147 | unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? |
| 5148 | AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : |
| 5149 | AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; |
| 5150 | unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? |
| 5151 | AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : |
| 5152 | AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; |
| 5153 | unsigned Encoding = |
| 5154 | AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | |
| 5155 | Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | |
| 5156 | WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; |
| 5157 | |
| 5158 | SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); |
| 5159 | SDValue ApertureReg = SDValue( |
| 5160 | DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); |
| 5161 | SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); |
| 5162 | return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); |
| 5163 | } |
| 5164 | |
| 5165 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5166 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 5167 | Register UserSGPR = Info->getQueuePtrUserSGPR(); |
| 5168 | assert(UserSGPR != AMDGPU::NoRegister); |
| 5169 | |
| 5170 | SDValue QueuePtr = CreateLiveInRegister( |
| 5171 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
| 5172 | |
| 5173 | // Offset into amd_queue_t for group_segment_aperture_base_hi / |
| 5174 | // private_segment_aperture_base_hi. |
| 5175 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; |
| 5176 | |
| 5177 | SDValue Ptr = |
| 5178 | DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset)); |
| 5179 | |
| 5180 | // TODO: Use custom target PseudoSourceValue. |
| 5181 | // TODO: We should use the value from the IR intrinsic call, but it might not |
| 5182 | // be available and how do we get it? |
| 5183 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
| 5184 | return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, |
| 5185 | commonAlignment(Align(64), StructOffset), |
| 5186 | MachineMemOperand::MODereferenceable | |
| 5187 | MachineMemOperand::MOInvariant); |
| 5188 | } |
| 5189 | |
| 5190 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, |
| 5191 | SelectionDAG &DAG) const { |
| 5192 | SDLoc SL(Op); |
| 5193 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); |
| 5194 | |
| 5195 | SDValue Src = ASC->getOperand(0); |
| 5196 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); |
| 5197 | |
| 5198 | const AMDGPUTargetMachine &TM = |
| 5199 | static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); |
| 5200 | |
| 5201 | // flat -> local/private |
| 5202 | if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
| 5203 | unsigned DestAS = ASC->getDestAddressSpace(); |
| 5204 | |
| 5205 | if (DestAS == AMDGPUAS::LOCAL_ADDRESS || |
| 5206 | DestAS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 5207 | unsigned NullVal = TM.getNullPointerValue(DestAS); |
| 5208 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
| 5209 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); |
| 5210 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
| 5211 | |
| 5212 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, |
| 5213 | NonNull, Ptr, SegmentNullPtr); |
| 5214 | } |
| 5215 | } |
| 5216 | |
| 5217 | // local/private -> flat |
| 5218 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
| 5219 | unsigned SrcAS = ASC->getSrcAddressSpace(); |
| 5220 | |
| 5221 | if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || |
| 5222 | SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 5223 | unsigned NullVal = TM.getNullPointerValue(SrcAS); |
| 5224 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
| 5225 | |
| 5226 | SDValue NonNull |
| 5227 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); |
| 5228 | |
| 5229 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); |
| 5230 | SDValue CvtPtr |
| 5231 | = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); |
| 5232 | |
| 5233 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, |
| 5234 | DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), |
| 5235 | FlatNullPtr); |
| 5236 | } |
| 5237 | } |
| 5238 | |
| 5239 | if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT && |
| 5240 | Src.getValueType() == MVT::i64) |
| 5241 | return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
| 5242 | |
| 5243 | // global <-> flat are no-ops and never emitted. |
| 5244 | |
| 5245 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 5246 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( |
| 5247 | MF.getFunction(), "invalid addrspacecast" , SL.getDebugLoc()); |
| 5248 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); |
| 5249 | |
| 5250 | return DAG.getUNDEF(ASC->getValueType(0)); |
| 5251 | } |
| 5252 | |
| 5253 | // This lowers an INSERT_SUBVECTOR by extracting the individual elements from |
| 5254 | // the small vector and inserting them into the big vector. That is better than |
| 5255 | // the default expansion of doing it via a stack slot. Even though the use of |
| 5256 | // the stack slot would be optimized away afterwards, the stack slot itself |
| 5257 | // remains. |
| 5258 | SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, |
| 5259 | SelectionDAG &DAG) const { |
| 5260 | SDValue Vec = Op.getOperand(0); |
| 5261 | SDValue Ins = Op.getOperand(1); |
| 5262 | SDValue Idx = Op.getOperand(2); |
| 5263 | EVT VecVT = Vec.getValueType(); |
| 5264 | EVT InsVT = Ins.getValueType(); |
| 5265 | EVT EltVT = VecVT.getVectorElementType(); |
| 5266 | unsigned InsNumElts = InsVT.getVectorNumElements(); |
| 5267 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); |
| 5268 | SDLoc SL(Op); |
| 5269 | |
| 5270 | for (unsigned I = 0; I != InsNumElts; ++I) { |
| 5271 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, |
| 5272 | DAG.getConstant(I, SL, MVT::i32)); |
| 5273 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, |
| 5274 | DAG.getConstant(IdxVal + I, SL, MVT::i32)); |
| 5275 | } |
| 5276 | return Vec; |
| 5277 | } |
| 5278 | |
| 5279 | SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, |
| 5280 | SelectionDAG &DAG) const { |
| 5281 | SDValue Vec = Op.getOperand(0); |
| 5282 | SDValue InsVal = Op.getOperand(1); |
| 5283 | SDValue Idx = Op.getOperand(2); |
| 5284 | EVT VecVT = Vec.getValueType(); |
| 5285 | EVT EltVT = VecVT.getVectorElementType(); |
| 5286 | unsigned VecSize = VecVT.getSizeInBits(); |
| 5287 | unsigned EltSize = EltVT.getSizeInBits(); |
| 5288 | |
| 5289 | |
| 5290 | assert(VecSize <= 64); |
| 5291 | |
| 5292 | unsigned NumElts = VecVT.getVectorNumElements(); |
| 5293 | SDLoc SL(Op); |
| 5294 | auto KIdx = dyn_cast<ConstantSDNode>(Idx); |
| 5295 | |
| 5296 | if (NumElts == 4 && EltSize == 16 && KIdx) { |
| 5297 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); |
| 5298 | |
| 5299 | SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
| 5300 | DAG.getConstant(0, SL, MVT::i32)); |
| 5301 | SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
| 5302 | DAG.getConstant(1, SL, MVT::i32)); |
| 5303 | |
| 5304 | SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); |
| 5305 | SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); |
| 5306 | |
| 5307 | unsigned Idx = KIdx->getZExtValue(); |
| 5308 | bool InsertLo = Idx < 2; |
| 5309 | SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, |
| 5310 | InsertLo ? LoVec : HiVec, |
| 5311 | DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), |
| 5312 | DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); |
| 5313 | |
| 5314 | InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); |
| 5315 | |
| 5316 | SDValue Concat = InsertLo ? |
| 5317 | DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : |
| 5318 | DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); |
| 5319 | |
| 5320 | return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); |
| 5321 | } |
| 5322 | |
| 5323 | if (isa<ConstantSDNode>(Idx)) |
| 5324 | return SDValue(); |
| 5325 | |
| 5326 | MVT IntVT = MVT::getIntegerVT(VecSize); |
| 5327 | |
| 5328 | // Avoid stack access for dynamic indexing. |
| 5329 | // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec |
| 5330 | |
| 5331 | // Create a congruent vector with the target value in each element so that |
| 5332 | // the required element can be masked and ORed into the target vector. |
| 5333 | SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, |
| 5334 | DAG.getSplatBuildVector(VecVT, SL, InsVal)); |
| 5335 | |
| 5336 | assert(isPowerOf2_32(EltSize)); |
| 5337 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
| 5338 | |
| 5339 | // Convert vector index to bit-index. |
| 5340 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
| 5341 | |
| 5342 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
| 5343 | SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, |
| 5344 | DAG.getConstant(0xffff, SL, IntVT), |
| 5345 | ScaledIdx); |
| 5346 | |
| 5347 | SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); |
| 5348 | SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, |
| 5349 | DAG.getNOT(SL, BFM, IntVT), BCVec); |
| 5350 | |
| 5351 | SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); |
| 5352 | return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); |
| 5353 | } |
| 5354 | |
| 5355 | SDValue SITargetLowering::(SDValue Op, |
| 5356 | SelectionDAG &DAG) const { |
| 5357 | SDLoc SL(Op); |
| 5358 | |
| 5359 | EVT ResultVT = Op.getValueType(); |
| 5360 | SDValue Vec = Op.getOperand(0); |
| 5361 | SDValue Idx = Op.getOperand(1); |
| 5362 | EVT VecVT = Vec.getValueType(); |
| 5363 | unsigned VecSize = VecVT.getSizeInBits(); |
| 5364 | EVT EltVT = VecVT.getVectorElementType(); |
| 5365 | assert(VecSize <= 64); |
| 5366 | |
| 5367 | DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); |
| 5368 | |
| 5369 | // Make sure we do any optimizations that will make it easier to fold |
| 5370 | // source modifiers before obscuring it with bit operations. |
| 5371 | |
| 5372 | // XXX - Why doesn't this get called when vector_shuffle is expanded? |
| 5373 | if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) |
| 5374 | return Combined; |
| 5375 | |
| 5376 | unsigned EltSize = EltVT.getSizeInBits(); |
| 5377 | assert(isPowerOf2_32(EltSize)); |
| 5378 | |
| 5379 | MVT IntVT = MVT::getIntegerVT(VecSize); |
| 5380 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
| 5381 | |
| 5382 | // Convert vector index to bit-index (* EltSize) |
| 5383 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
| 5384 | |
| 5385 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
| 5386 | SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); |
| 5387 | |
| 5388 | if (ResultVT == MVT::f16) { |
| 5389 | SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); |
| 5390 | return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); |
| 5391 | } |
| 5392 | |
| 5393 | return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); |
| 5394 | } |
| 5395 | |
| 5396 | static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { |
| 5397 | assert(Elt % 2 == 0); |
| 5398 | return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); |
| 5399 | } |
| 5400 | |
| 5401 | SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, |
| 5402 | SelectionDAG &DAG) const { |
| 5403 | SDLoc SL(Op); |
| 5404 | EVT ResultVT = Op.getValueType(); |
| 5405 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); |
| 5406 | |
| 5407 | EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
| 5408 | EVT EltVT = PackVT.getVectorElementType(); |
| 5409 | int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); |
| 5410 | |
| 5411 | // vector_shuffle <0,1,6,7> lhs, rhs |
| 5412 | // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) |
| 5413 | // |
| 5414 | // vector_shuffle <6,7,2,3> lhs, rhs |
| 5415 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) |
| 5416 | // |
| 5417 | // vector_shuffle <6,7,0,1> lhs, rhs |
| 5418 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) |
| 5419 | |
| 5420 | // Avoid scalarizing when both halves are reading from consecutive elements. |
| 5421 | SmallVector<SDValue, 4> Pieces; |
| 5422 | for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { |
| 5423 | if (elementPairIsContiguous(SVN->getMask(), I)) { |
| 5424 | const int Idx = SVN->getMaskElt(I); |
| 5425 | int VecIdx = Idx < SrcNumElts ? 0 : 1; |
| 5426 | int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; |
| 5427 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, |
| 5428 | PackVT, SVN->getOperand(VecIdx), |
| 5429 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
| 5430 | Pieces.push_back(SubVec); |
| 5431 | } else { |
| 5432 | const int Idx0 = SVN->getMaskElt(I); |
| 5433 | const int Idx1 = SVN->getMaskElt(I + 1); |
| 5434 | int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; |
| 5435 | int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; |
| 5436 | int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; |
| 5437 | int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; |
| 5438 | |
| 5439 | SDValue Vec0 = SVN->getOperand(VecIdx0); |
| 5440 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
| 5441 | Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); |
| 5442 | |
| 5443 | SDValue Vec1 = SVN->getOperand(VecIdx1); |
| 5444 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
| 5445 | Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); |
| 5446 | Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); |
| 5447 | } |
| 5448 | } |
| 5449 | |
| 5450 | return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); |
| 5451 | } |
| 5452 | |
| 5453 | SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, |
| 5454 | SelectionDAG &DAG) const { |
| 5455 | SDLoc SL(Op); |
| 5456 | EVT VT = Op.getValueType(); |
| 5457 | |
| 5458 | if (VT == MVT::v4i16 || VT == MVT::v4f16) { |
| 5459 | EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); |
| 5460 | |
| 5461 | // Turn into pair of packed build_vectors. |
| 5462 | // TODO: Special case for constants that can be materialized with s_mov_b64. |
| 5463 | SDValue Lo = DAG.getBuildVector(HalfVT, SL, |
| 5464 | { Op.getOperand(0), Op.getOperand(1) }); |
| 5465 | SDValue Hi = DAG.getBuildVector(HalfVT, SL, |
| 5466 | { Op.getOperand(2), Op.getOperand(3) }); |
| 5467 | |
| 5468 | SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); |
| 5469 | SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); |
| 5470 | |
| 5471 | SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); |
| 5472 | return DAG.getNode(ISD::BITCAST, SL, VT, Blend); |
| 5473 | } |
| 5474 | |
| 5475 | assert(VT == MVT::v2f16 || VT == MVT::v2i16); |
| 5476 | assert(!Subtarget->hasVOP3PInsts() && "this should be legal" ); |
| 5477 | |
| 5478 | SDValue Lo = Op.getOperand(0); |
| 5479 | SDValue Hi = Op.getOperand(1); |
| 5480 | |
| 5481 | // Avoid adding defined bits with the zero_extend. |
| 5482 | if (Hi.isUndef()) { |
| 5483 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
| 5484 | SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); |
| 5485 | return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); |
| 5486 | } |
| 5487 | |
| 5488 | Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); |
| 5489 | Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); |
| 5490 | |
| 5491 | SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, |
| 5492 | DAG.getConstant(16, SL, MVT::i32)); |
| 5493 | if (Lo.isUndef()) |
| 5494 | return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); |
| 5495 | |
| 5496 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
| 5497 | Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); |
| 5498 | |
| 5499 | SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); |
| 5500 | return DAG.getNode(ISD::BITCAST, SL, VT, Or); |
| 5501 | } |
| 5502 | |
| 5503 | bool |
| 5504 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 5505 | // We can fold offsets for anything that doesn't require a GOT relocation. |
| 5506 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || |
| 5507 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
| 5508 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
| 5509 | !shouldEmitGOTReloc(GA->getGlobal()); |
| 5510 | } |
| 5511 | |
| 5512 | static SDValue |
| 5513 | buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, |
| 5514 | const SDLoc &DL, int64_t Offset, EVT PtrVT, |
| 5515 | unsigned GAFlags = SIInstrInfo::MO_NONE) { |
| 5516 | assert(isInt<32>(Offset + 4) && "32-bit offset is expected!" ); |
| 5517 | // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is |
| 5518 | // lowered to the following code sequence: |
| 5519 | // |
| 5520 | // For constant address space: |
| 5521 | // s_getpc_b64 s[0:1] |
| 5522 | // s_add_u32 s0, s0, $symbol |
| 5523 | // s_addc_u32 s1, s1, 0 |
| 5524 | // |
| 5525 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
| 5526 | // a fixup or relocation is emitted to replace $symbol with a literal |
| 5527 | // constant, which is a pc-relative offset from the encoding of the $symbol |
| 5528 | // operand to the global variable. |
| 5529 | // |
| 5530 | // For global address space: |
| 5531 | // s_getpc_b64 s[0:1] |
| 5532 | // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo |
| 5533 | // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi |
| 5534 | // |
| 5535 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
| 5536 | // fixups or relocations are emitted to replace $symbol@*@lo and |
| 5537 | // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, |
| 5538 | // which is a 64-bit pc-relative offset from the encoding of the $symbol |
| 5539 | // operand to the global variable. |
| 5540 | // |
| 5541 | // What we want here is an offset from the value returned by s_getpc |
| 5542 | // (which is the address of the s_add_u32 instruction) to the global |
| 5543 | // variable, but since the encoding of $symbol starts 4 bytes after the start |
| 5544 | // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too |
| 5545 | // small. This requires us to add 4 to the global variable offset in order to |
| 5546 | // compute the correct address. Similarly for the s_addc_u32 instruction, the |
| 5547 | // encoding of $symbol starts 12 bytes after the start of the s_add_u32 |
| 5548 | // instruction. |
| 5549 | SDValue PtrLo = |
| 5550 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); |
| 5551 | SDValue PtrHi; |
| 5552 | if (GAFlags == SIInstrInfo::MO_NONE) { |
| 5553 | PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); |
| 5554 | } else { |
| 5555 | PtrHi = |
| 5556 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1); |
| 5557 | } |
| 5558 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); |
| 5559 | } |
| 5560 | |
| 5561 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
| 5562 | SDValue Op, |
| 5563 | SelectionDAG &DAG) const { |
| 5564 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
| 5565 | SDLoc DL(GSD); |
| 5566 | EVT PtrVT = Op.getValueType(); |
| 5567 | |
| 5568 | const GlobalValue *GV = GSD->getGlobal(); |
| 5569 | if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && |
| 5570 | shouldUseLDSConstAddress(GV)) || |
| 5571 | GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || |
| 5572 | GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
| 5573 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && |
| 5574 | GV->hasExternalLinkage()) { |
| 5575 | Type *Ty = GV->getValueType(); |
| 5576 | // HIP uses an unsized array `extern __shared__ T s[]` or similar |
| 5577 | // zero-sized type in other languages to declare the dynamic shared |
| 5578 | // memory which size is not known at the compile time. They will be |
| 5579 | // allocated by the runtime and placed directly after the static |
| 5580 | // allocated ones. They all share the same offset. |
| 5581 | if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) { |
| 5582 | assert(PtrVT == MVT::i32 && "32-bit pointer is expected." ); |
| 5583 | // Adjust alignment for that dynamic shared memory array. |
| 5584 | MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV)); |
| 5585 | return SDValue( |
| 5586 | DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0); |
| 5587 | } |
| 5588 | } |
| 5589 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
| 5590 | } |
| 5591 | |
| 5592 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
| 5593 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), |
| 5594 | SIInstrInfo::MO_ABS32_LO); |
| 5595 | return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); |
| 5596 | } |
| 5597 | |
| 5598 | if (shouldEmitFixup(GV)) |
| 5599 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); |
| 5600 | else if (shouldEmitPCReloc(GV)) |
| 5601 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, |
| 5602 | SIInstrInfo::MO_REL32); |
| 5603 | |
| 5604 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, |
| 5605 | SIInstrInfo::MO_GOTPCREL32); |
| 5606 | |
| 5607 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); |
| 5608 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
| 5609 | const DataLayout &DataLayout = DAG.getDataLayout(); |
| 5610 | Align Alignment = DataLayout.getABITypeAlign(PtrTy); |
| 5611 | MachinePointerInfo PtrInfo |
| 5612 | = MachinePointerInfo::getGOT(DAG.getMachineFunction()); |
| 5613 | |
| 5614 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment, |
| 5615 | MachineMemOperand::MODereferenceable | |
| 5616 | MachineMemOperand::MOInvariant); |
| 5617 | } |
| 5618 | |
| 5619 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, |
| 5620 | const SDLoc &DL, SDValue V) const { |
| 5621 | // We can't use S_MOV_B32 directly, because there is no way to specify m0 as |
| 5622 | // the destination register. |
| 5623 | // |
| 5624 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, |
| 5625 | // so we will end up with redundant moves to m0. |
| 5626 | // |
| 5627 | // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. |
| 5628 | |
| 5629 | // A Null SDValue creates a glue result. |
| 5630 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, |
| 5631 | V, Chain); |
| 5632 | return SDValue(M0, 0); |
| 5633 | } |
| 5634 | |
| 5635 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, |
| 5636 | SDValue Op, |
| 5637 | MVT VT, |
| 5638 | unsigned Offset) const { |
| 5639 | SDLoc SL(Op); |
| 5640 | SDValue Param = lowerKernargMemParameter( |
| 5641 | DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false); |
| 5642 | // The local size values will have the hi 16-bits as zero. |
| 5643 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, |
| 5644 | DAG.getValueType(VT)); |
| 5645 | } |
| 5646 | |
| 5647 | static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
| 5648 | EVT VT) { |
| 5649 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
| 5650 | "non-hsa intrinsic with hsa target" , |
| 5651 | DL.getDebugLoc()); |
| 5652 | DAG.getContext()->diagnose(BadIntrin); |
| 5653 | return DAG.getUNDEF(VT); |
| 5654 | } |
| 5655 | |
| 5656 | static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
| 5657 | EVT VT) { |
| 5658 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
| 5659 | "intrinsic not supported on subtarget" , |
| 5660 | DL.getDebugLoc()); |
| 5661 | DAG.getContext()->diagnose(BadIntrin); |
| 5662 | return DAG.getUNDEF(VT); |
| 5663 | } |
| 5664 | |
| 5665 | static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, |
| 5666 | ArrayRef<SDValue> Elts) { |
| 5667 | assert(!Elts.empty()); |
| 5668 | MVT Type; |
| 5669 | unsigned NumElts; |
| 5670 | |
| 5671 | if (Elts.size() == 1) { |
| 5672 | Type = MVT::f32; |
| 5673 | NumElts = 1; |
| 5674 | } else if (Elts.size() == 2) { |
| 5675 | Type = MVT::v2f32; |
| 5676 | NumElts = 2; |
| 5677 | } else if (Elts.size() == 3) { |
| 5678 | Type = MVT::v3f32; |
| 5679 | NumElts = 3; |
| 5680 | } else if (Elts.size() <= 4) { |
| 5681 | Type = MVT::v4f32; |
| 5682 | NumElts = 4; |
| 5683 | } else if (Elts.size() <= 8) { |
| 5684 | Type = MVT::v8f32; |
| 5685 | NumElts = 8; |
| 5686 | } else { |
| 5687 | assert(Elts.size() <= 16); |
| 5688 | Type = MVT::v16f32; |
| 5689 | NumElts = 16; |
| 5690 | } |
| 5691 | |
| 5692 | SmallVector<SDValue, 16> VecElts(NumElts); |
| 5693 | for (unsigned i = 0; i < Elts.size(); ++i) { |
| 5694 | SDValue Elt = Elts[i]; |
| 5695 | if (Elt.getValueType() != MVT::f32) |
| 5696 | Elt = DAG.getBitcast(MVT::f32, Elt); |
| 5697 | VecElts[i] = Elt; |
| 5698 | } |
| 5699 | for (unsigned i = Elts.size(); i < NumElts; ++i) |
| 5700 | VecElts[i] = DAG.getUNDEF(MVT::f32); |
| 5701 | |
| 5702 | if (NumElts == 1) |
| 5703 | return VecElts[0]; |
| 5704 | return DAG.getBuildVector(Type, DL, VecElts); |
| 5705 | } |
| 5706 | |
| 5707 | static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG, |
| 5708 | SDValue *GLC, SDValue *SLC, SDValue *DLC) { |
| 5709 | auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode()); |
| 5710 | |
| 5711 | uint64_t Value = CachePolicyConst->getZExtValue(); |
| 5712 | SDLoc DL(CachePolicy); |
| 5713 | if (GLC) { |
| 5714 | *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); |
| 5715 | Value &= ~(uint64_t)0x1; |
| 5716 | } |
| 5717 | if (SLC) { |
| 5718 | *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); |
| 5719 | Value &= ~(uint64_t)0x2; |
| 5720 | } |
| 5721 | if (DLC) { |
| 5722 | *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32); |
| 5723 | Value &= ~(uint64_t)0x4; |
| 5724 | } |
| 5725 | |
| 5726 | return Value == 0; |
| 5727 | } |
| 5728 | |
| 5729 | static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, |
| 5730 | SDValue Src, int ) { |
| 5731 | EVT SrcVT = Src.getValueType(); |
| 5732 | |
| 5733 | SmallVector<SDValue, 8> Elts; |
| 5734 | |
| 5735 | if (SrcVT.isVector()) |
| 5736 | DAG.ExtractVectorElements(Src, Elts); |
| 5737 | else |
| 5738 | Elts.push_back(Src); |
| 5739 | |
| 5740 | SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); |
| 5741 | while (ExtraElts--) |
| 5742 | Elts.push_back(Undef); |
| 5743 | |
| 5744 | return DAG.getBuildVector(CastVT, DL, Elts); |
| 5745 | } |
| 5746 | |
| 5747 | // Re-construct the required return value for a image load intrinsic. |
| 5748 | // This is more complicated due to the optional use TexFailCtrl which means the required |
| 5749 | // return type is an aggregate |
| 5750 | static SDValue constructRetValue(SelectionDAG &DAG, |
| 5751 | MachineSDNode *Result, |
| 5752 | ArrayRef<EVT> ResultTypes, |
| 5753 | bool IsTexFail, bool Unpacked, bool IsD16, |
| 5754 | int DMaskPop, int NumVDataDwords, |
| 5755 | const SDLoc &DL, LLVMContext &Context) { |
| 5756 | // Determine the required return type. This is the same regardless of IsTexFail flag |
| 5757 | EVT ReqRetVT = ResultTypes[0]; |
| 5758 | int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; |
| 5759 | int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
| 5760 | ReqRetNumElts : (ReqRetNumElts + 1) / 2; |
| 5761 | |
| 5762 | int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
| 5763 | DMaskPop : (DMaskPop + 1) / 2; |
| 5764 | |
| 5765 | MVT DataDwordVT = NumDataDwords == 1 ? |
| 5766 | MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); |
| 5767 | |
| 5768 | MVT MaskPopVT = MaskPopDwords == 1 ? |
| 5769 | MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); |
| 5770 | |
| 5771 | SDValue Data(Result, 0); |
| 5772 | SDValue TexFail; |
| 5773 | |
| 5774 | if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) { |
| 5775 | SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); |
| 5776 | if (MaskPopVT.isVector()) { |
| 5777 | Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, |
| 5778 | SDValue(Result, 0), ZeroIdx); |
| 5779 | } else { |
| 5780 | Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, |
| 5781 | SDValue(Result, 0), ZeroIdx); |
| 5782 | } |
| 5783 | } |
| 5784 | |
| 5785 | if (DataDwordVT.isVector()) |
| 5786 | Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, |
| 5787 | NumDataDwords - MaskPopDwords); |
| 5788 | |
| 5789 | if (IsD16) |
| 5790 | Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); |
| 5791 | |
| 5792 | EVT LegalReqRetVT = ReqRetVT; |
| 5793 | if (!ReqRetVT.isVector()) { |
| 5794 | Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); |
| 5795 | } else { |
| 5796 | // We need to widen the return vector to a legal type |
| 5797 | if ((ReqRetVT.getVectorNumElements() % 2) == 1 && |
| 5798 | ReqRetVT.getVectorElementType().getSizeInBits() == 16) { |
| 5799 | LegalReqRetVT = |
| 5800 | EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(), |
| 5801 | ReqRetVT.getVectorNumElements() + 1); |
| 5802 | } |
| 5803 | } |
| 5804 | Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data); |
| 5805 | |
| 5806 | if (IsTexFail) { |
| 5807 | TexFail = |
| 5808 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0), |
| 5809 | DAG.getConstant(MaskPopDwords, DL, MVT::i32)); |
| 5810 | |
| 5811 | return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); |
| 5812 | } |
| 5813 | |
| 5814 | if (Result->getNumValues() == 1) |
| 5815 | return Data; |
| 5816 | |
| 5817 | return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); |
| 5818 | } |
| 5819 | |
| 5820 | static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, |
| 5821 | SDValue *LWE, bool &IsTexFail) { |
| 5822 | auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); |
| 5823 | |
| 5824 | uint64_t Value = TexFailCtrlConst->getZExtValue(); |
| 5825 | if (Value) { |
| 5826 | IsTexFail = true; |
| 5827 | } |
| 5828 | |
| 5829 | SDLoc DL(TexFailCtrlConst); |
| 5830 | *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); |
| 5831 | Value &= ~(uint64_t)0x1; |
| 5832 | *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); |
| 5833 | Value &= ~(uint64_t)0x2; |
| 5834 | |
| 5835 | return Value == 0; |
| 5836 | } |
| 5837 | |
| 5838 | static void packImageA16AddressToDwords(SelectionDAG &DAG, SDValue Op, |
| 5839 | MVT PackVectorVT, |
| 5840 | SmallVectorImpl<SDValue> &PackedAddrs, |
| 5841 | unsigned DimIdx, unsigned EndIdx, |
| 5842 | unsigned NumGradients) { |
| 5843 | SDLoc DL(Op); |
| 5844 | for (unsigned I = DimIdx; I < EndIdx; I++) { |
| 5845 | SDValue Addr = Op.getOperand(I); |
| 5846 | |
| 5847 | // Gradients are packed with undef for each coordinate. |
| 5848 | // In <hi 16 bit>,<lo 16 bit> notation, the registers look like this: |
| 5849 | // 1D: undef,dx/dh; undef,dx/dv |
| 5850 | // 2D: dy/dh,dx/dh; dy/dv,dx/dv |
| 5851 | // 3D: dy/dh,dx/dh; undef,dz/dh; dy/dv,dx/dv; undef,dz/dv |
| 5852 | if (((I + 1) >= EndIdx) || |
| 5853 | ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 || |
| 5854 | I == DimIdx + NumGradients - 1))) { |
| 5855 | if (Addr.getValueType() != MVT::i16) |
| 5856 | Addr = DAG.getBitcast(MVT::i16, Addr); |
| 5857 | Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr); |
| 5858 | } else { |
| 5859 | Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)}); |
| 5860 | I++; |
| 5861 | } |
| 5862 | Addr = DAG.getBitcast(MVT::f32, Addr); |
| 5863 | PackedAddrs.push_back(Addr); |
| 5864 | } |
| 5865 | } |
| 5866 | |
| 5867 | SDValue SITargetLowering::lowerImage(SDValue Op, |
| 5868 | const AMDGPU::ImageDimIntrinsicInfo *Intr, |
| 5869 | SelectionDAG &DAG, bool WithChain) const { |
| 5870 | SDLoc DL(Op); |
| 5871 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5872 | const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); |
| 5873 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
| 5874 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
| 5875 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); |
| 5876 | const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = |
| 5877 | AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); |
| 5878 | const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = |
| 5879 | AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); |
| 5880 | unsigned IntrOpcode = Intr->BaseOpcode; |
| 5881 | bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); |
| 5882 | |
| 5883 | SmallVector<EVT, 3> ResultTypes(Op->values()); |
| 5884 | SmallVector<EVT, 3> OrigResultTypes(Op->values()); |
| 5885 | bool IsD16 = false; |
| 5886 | bool IsG16 = false; |
| 5887 | bool IsA16 = false; |
| 5888 | SDValue VData; |
| 5889 | int NumVDataDwords; |
| 5890 | bool AdjustRetType = false; |
| 5891 | |
| 5892 | // Offset of intrinsic arguments |
| 5893 | const unsigned ArgOffset = WithChain ? 2 : 1; |
| 5894 | |
| 5895 | unsigned DMask; |
| 5896 | unsigned DMaskLanes = 0; |
| 5897 | |
| 5898 | if (BaseOpcode->Atomic) { |
| 5899 | VData = Op.getOperand(2); |
| 5900 | |
| 5901 | bool Is64Bit = VData.getValueType() == MVT::i64; |
| 5902 | if (BaseOpcode->AtomicX2) { |
| 5903 | SDValue VData2 = Op.getOperand(3); |
| 5904 | VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, |
| 5905 | {VData, VData2}); |
| 5906 | if (Is64Bit) |
| 5907 | VData = DAG.getBitcast(MVT::v4i32, VData); |
| 5908 | |
| 5909 | ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; |
| 5910 | DMask = Is64Bit ? 0xf : 0x3; |
| 5911 | NumVDataDwords = Is64Bit ? 4 : 2; |
| 5912 | } else { |
| 5913 | DMask = Is64Bit ? 0x3 : 0x1; |
| 5914 | NumVDataDwords = Is64Bit ? 2 : 1; |
| 5915 | } |
| 5916 | } else { |
| 5917 | auto *DMaskConst = |
| 5918 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex)); |
| 5919 | DMask = DMaskConst->getZExtValue(); |
| 5920 | DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); |
| 5921 | |
| 5922 | if (BaseOpcode->Store) { |
| 5923 | VData = Op.getOperand(2); |
| 5924 | |
| 5925 | MVT StoreVT = VData.getSimpleValueType(); |
| 5926 | if (StoreVT.getScalarType() == MVT::f16) { |
| 5927 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
| 5928 | return Op; // D16 is unsupported for this instruction |
| 5929 | |
| 5930 | IsD16 = true; |
| 5931 | VData = handleD16VData(VData, DAG, true); |
| 5932 | } |
| 5933 | |
| 5934 | NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; |
| 5935 | } else { |
| 5936 | // Work out the num dwords based on the dmask popcount and underlying type |
| 5937 | // and whether packing is supported. |
| 5938 | MVT LoadVT = ResultTypes[0].getSimpleVT(); |
| 5939 | if (LoadVT.getScalarType() == MVT::f16) { |
| 5940 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
| 5941 | return Op; // D16 is unsupported for this instruction |
| 5942 | |
| 5943 | IsD16 = true; |
| 5944 | } |
| 5945 | |
| 5946 | // Confirm that the return type is large enough for the dmask specified |
| 5947 | if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || |
| 5948 | (!LoadVT.isVector() && DMaskLanes > 1)) |
| 5949 | return Op; |
| 5950 | |
| 5951 | // The sq block of gfx8 and gfx9 do not estimate register use correctly |
| 5952 | // for d16 image_gather4, image_gather4_l, and image_gather4_lz |
| 5953 | // instructions. |
| 5954 | if (IsD16 && !Subtarget->hasUnpackedD16VMem() && |
| 5955 | !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug())) |
| 5956 | NumVDataDwords = (DMaskLanes + 1) / 2; |
| 5957 | else |
| 5958 | NumVDataDwords = DMaskLanes; |
| 5959 | |
| 5960 | AdjustRetType = true; |
| 5961 | } |
| 5962 | } |
| 5963 | |
| 5964 | unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd; |
| 5965 | SmallVector<SDValue, 4> VAddrs; |
| 5966 | |
| 5967 | // Optimize _L to _LZ when _L is zero |
| 5968 | if (LZMappingInfo) { |
| 5969 | if (auto *ConstantLod = dyn_cast<ConstantFPSDNode>( |
| 5970 | Op.getOperand(ArgOffset + Intr->LodIndex))) { |
| 5971 | if (ConstantLod->isZero() || ConstantLod->isNegative()) { |
| 5972 | IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l |
| 5973 | VAddrEnd--; // remove 'lod' |
| 5974 | } |
| 5975 | } |
| 5976 | } |
| 5977 | |
| 5978 | // Optimize _mip away, when 'lod' is zero |
| 5979 | if (MIPMappingInfo) { |
| 5980 | if (auto *ConstantLod = dyn_cast<ConstantSDNode>( |
| 5981 | Op.getOperand(ArgOffset + Intr->MipIndex))) { |
| 5982 | if (ConstantLod->isNullValue()) { |
| 5983 | IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip |
| 5984 | VAddrEnd--; // remove 'mip' |
| 5985 | } |
| 5986 | } |
| 5987 | } |
| 5988 | |
| 5989 | // Push back extra arguments. |
| 5990 | for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) |
| 5991 | VAddrs.push_back(Op.getOperand(ArgOffset + I)); |
| 5992 | |
| 5993 | // Check for 16 bit addresses or derivatives and pack if true. |
| 5994 | MVT VAddrVT = |
| 5995 | Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType(); |
| 5996 | MVT VAddrScalarVT = VAddrVT.getScalarType(); |
| 5997 | MVT PackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; |
| 5998 | IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; |
| 5999 | |
| 6000 | VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType(); |
| 6001 | VAddrScalarVT = VAddrVT.getScalarType(); |
| 6002 | IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; |
| 6003 | if (IsA16 || IsG16) { |
| 6004 | if (IsA16) { |
| 6005 | if (!ST->hasA16()) { |
| 6006 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not " |
| 6007 | "support 16 bit addresses\n" ); |
| 6008 | return Op; |
| 6009 | } |
| 6010 | if (!IsG16) { |
| 6011 | LLVM_DEBUG( |
| 6012 | dbgs() << "Failed to lower image intrinsic: 16 bit addresses " |
| 6013 | "need 16 bit derivatives but got 32 bit derivatives\n" ); |
| 6014 | return Op; |
| 6015 | } |
| 6016 | } else if (!ST->hasG16()) { |
| 6017 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not " |
| 6018 | "support 16 bit derivatives\n" ); |
| 6019 | return Op; |
| 6020 | } |
| 6021 | |
| 6022 | if (BaseOpcode->Gradients && !IsA16) { |
| 6023 | if (!ST->hasG16()) { |
| 6024 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not " |
| 6025 | "support 16 bit derivatives\n" ); |
| 6026 | return Op; |
| 6027 | } |
| 6028 | // Activate g16 |
| 6029 | const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = |
| 6030 | AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); |
| 6031 | IntrOpcode = G16MappingInfo->G16; // set new opcode to variant with _g16 |
| 6032 | } |
| 6033 | |
| 6034 | // Don't compress addresses for G16 |
| 6035 | const int PackEndIdx = IsA16 ? VAddrEnd : (ArgOffset + Intr->CoordStart); |
| 6036 | packImageA16AddressToDwords(DAG, Op, PackVectorVT, VAddrs, |
| 6037 | ArgOffset + Intr->GradientStart, PackEndIdx, |
| 6038 | Intr->NumGradients); |
| 6039 | |
| 6040 | if (!IsA16) { |
| 6041 | // Add uncompressed address |
| 6042 | for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++) |
| 6043 | VAddrs.push_back(Op.getOperand(I)); |
| 6044 | } |
| 6045 | } else { |
| 6046 | for (unsigned I = ArgOffset + Intr->GradientStart; I < VAddrEnd; I++) |
| 6047 | VAddrs.push_back(Op.getOperand(I)); |
| 6048 | } |
| 6049 | |
| 6050 | // If the register allocator cannot place the address registers contiguously |
| 6051 | // without introducing moves, then using the non-sequential address encoding |
| 6052 | // is always preferable, since it saves VALU instructions and is usually a |
| 6053 | // wash in terms of code size or even better. |
| 6054 | // |
| 6055 | // However, we currently have no way of hinting to the register allocator that |
| 6056 | // MIMG addresses should be placed contiguously when it is possible to do so, |
| 6057 | // so force non-NSA for the common 2-address case as a heuristic. |
| 6058 | // |
| 6059 | // SIShrinkInstructions will convert NSA encodings to non-NSA after register |
| 6060 | // allocation when possible. |
| 6061 | bool UseNSA = |
| 6062 | ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3; |
| 6063 | SDValue VAddr; |
| 6064 | if (!UseNSA) |
| 6065 | VAddr = getBuildDwordsVector(DAG, DL, VAddrs); |
| 6066 | |
| 6067 | SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); |
| 6068 | SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); |
| 6069 | SDValue Unorm; |
| 6070 | if (!BaseOpcode->Sampler) { |
| 6071 | Unorm = True; |
| 6072 | } else { |
| 6073 | auto UnormConst = |
| 6074 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex)); |
| 6075 | |
| 6076 | Unorm = UnormConst->getZExtValue() ? True : False; |
| 6077 | } |
| 6078 | |
| 6079 | SDValue TFE; |
| 6080 | SDValue LWE; |
| 6081 | SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex); |
| 6082 | bool IsTexFail = false; |
| 6083 | if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) |
| 6084 | return Op; |
| 6085 | |
| 6086 | if (IsTexFail) { |
| 6087 | if (!DMaskLanes) { |
| 6088 | // Expecting to get an error flag since TFC is on - and dmask is 0 |
| 6089 | // Force dmask to be at least 1 otherwise the instruction will fail |
| 6090 | DMask = 0x1; |
| 6091 | DMaskLanes = 1; |
| 6092 | NumVDataDwords = 1; |
| 6093 | } |
| 6094 | NumVDataDwords += 1; |
| 6095 | AdjustRetType = true; |
| 6096 | } |
| 6097 | |
| 6098 | // Has something earlier tagged that the return type needs adjusting |
| 6099 | // This happens if the instruction is a load or has set TexFailCtrl flags |
| 6100 | if (AdjustRetType) { |
| 6101 | // NumVDataDwords reflects the true number of dwords required in the return type |
| 6102 | if (DMaskLanes == 0 && !BaseOpcode->Store) { |
| 6103 | // This is a no-op load. This can be eliminated |
| 6104 | SDValue Undef = DAG.getUNDEF(Op.getValueType()); |
| 6105 | if (isa<MemSDNode>(Op)) |
| 6106 | return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); |
| 6107 | return Undef; |
| 6108 | } |
| 6109 | |
| 6110 | EVT NewVT = NumVDataDwords > 1 ? |
| 6111 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) |
| 6112 | : MVT::i32; |
| 6113 | |
| 6114 | ResultTypes[0] = NewVT; |
| 6115 | if (ResultTypes.size() == 3) { |
| 6116 | // Original result was aggregate type used for TexFailCtrl results |
| 6117 | // The actual instruction returns as a vector type which has now been |
| 6118 | // created. Remove the aggregate result. |
| 6119 | ResultTypes.erase(&ResultTypes[1]); |
| 6120 | } |
| 6121 | } |
| 6122 | |
| 6123 | SDValue GLC; |
| 6124 | SDValue SLC; |
| 6125 | SDValue DLC; |
| 6126 | if (BaseOpcode->Atomic) { |
| 6127 | GLC = True; // TODO no-return optimization |
| 6128 | if (!parseCachePolicy(Op.getOperand(ArgOffset + Intr->CachePolicyIndex), |
| 6129 | DAG, nullptr, &SLC, IsGFX10Plus ? &DLC : nullptr)) |
| 6130 | return Op; |
| 6131 | } else { |
| 6132 | if (!parseCachePolicy(Op.getOperand(ArgOffset + Intr->CachePolicyIndex), |
| 6133 | DAG, &GLC, &SLC, IsGFX10Plus ? &DLC : nullptr)) |
| 6134 | return Op; |
| 6135 | } |
| 6136 | |
| 6137 | SmallVector<SDValue, 26> Ops; |
| 6138 | if (BaseOpcode->Store || BaseOpcode->Atomic) |
| 6139 | Ops.push_back(VData); // vdata |
| 6140 | if (UseNSA) |
| 6141 | append_range(Ops, VAddrs); |
| 6142 | else |
| 6143 | Ops.push_back(VAddr); |
| 6144 | Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); |
| 6145 | if (BaseOpcode->Sampler) |
| 6146 | Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); |
| 6147 | Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); |
| 6148 | if (IsGFX10Plus) |
| 6149 | Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); |
| 6150 | Ops.push_back(Unorm); |
| 6151 | if (IsGFX10Plus) |
| 6152 | Ops.push_back(DLC); |
| 6153 | Ops.push_back(GLC); |
| 6154 | Ops.push_back(SLC); |
| 6155 | Ops.push_back(IsA16 && // r128, a16 for gfx9 |
| 6156 | ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); |
| 6157 | if (IsGFX10Plus) |
| 6158 | Ops.push_back(IsA16 ? True : False); |
| 6159 | Ops.push_back(TFE); |
| 6160 | Ops.push_back(LWE); |
| 6161 | if (!IsGFX10Plus) |
| 6162 | Ops.push_back(DimInfo->DA ? True : False); |
| 6163 | if (BaseOpcode->HasD16) |
| 6164 | Ops.push_back(IsD16 ? True : False); |
| 6165 | if (isa<MemSDNode>(Op)) |
| 6166 | Ops.push_back(Op.getOperand(0)); // chain |
| 6167 | |
| 6168 | int NumVAddrDwords = |
| 6169 | UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; |
| 6170 | int Opcode = -1; |
| 6171 | |
| 6172 | if (IsGFX10Plus) { |
| 6173 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
| 6174 | UseNSA ? AMDGPU::MIMGEncGfx10NSA |
| 6175 | : AMDGPU::MIMGEncGfx10Default, |
| 6176 | NumVDataDwords, NumVAddrDwords); |
| 6177 | } else { |
| 6178 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 6179 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, |
| 6180 | NumVDataDwords, NumVAddrDwords); |
| 6181 | if (Opcode == -1) |
| 6182 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, |
| 6183 | NumVDataDwords, NumVAddrDwords); |
| 6184 | } |
| 6185 | assert(Opcode != -1); |
| 6186 | |
| 6187 | MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); |
| 6188 | if (auto MemOp = dyn_cast<MemSDNode>(Op)) { |
| 6189 | MachineMemOperand *MemRef = MemOp->getMemOperand(); |
| 6190 | DAG.setNodeMemRefs(NewNode, {MemRef}); |
| 6191 | } |
| 6192 | |
| 6193 | if (BaseOpcode->AtomicX2) { |
| 6194 | SmallVector<SDValue, 1> Elt; |
| 6195 | DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); |
| 6196 | return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); |
| 6197 | } else if (!BaseOpcode->Store) { |
| 6198 | return constructRetValue(DAG, NewNode, |
| 6199 | OrigResultTypes, IsTexFail, |
| 6200 | Subtarget->hasUnpackedD16VMem(), IsD16, |
| 6201 | DMaskLanes, NumVDataDwords, DL, |
| 6202 | *DAG.getContext()); |
| 6203 | } |
| 6204 | |
| 6205 | return SDValue(NewNode, 0); |
| 6206 | } |
| 6207 | |
| 6208 | SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, |
| 6209 | SDValue Offset, SDValue CachePolicy, |
| 6210 | SelectionDAG &DAG) const { |
| 6211 | MachineFunction &MF = DAG.getMachineFunction(); |
| 6212 | |
| 6213 | const DataLayout &DataLayout = DAG.getDataLayout(); |
| 6214 | Align Alignment = |
| 6215 | DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); |
| 6216 | |
| 6217 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 6218 | MachinePointerInfo(), |
| 6219 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | |
| 6220 | MachineMemOperand::MOInvariant, |
| 6221 | VT.getStoreSize(), Alignment); |
| 6222 | |
| 6223 | if (!Offset->isDivergent()) { |
| 6224 | SDValue Ops[] = { |
| 6225 | Rsrc, |
| 6226 | Offset, // Offset |
| 6227 | CachePolicy |
| 6228 | }; |
| 6229 | |
| 6230 | // Widen vec3 load to vec4. |
| 6231 | if (VT.isVector() && VT.getVectorNumElements() == 3) { |
| 6232 | EVT WidenedVT = |
| 6233 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); |
| 6234 | auto WidenedOp = DAG.getMemIntrinsicNode( |
| 6235 | AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, |
| 6236 | MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); |
| 6237 | auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, |
| 6238 | DAG.getVectorIdxConstant(0, DL)); |
| 6239 | return Subvector; |
| 6240 | } |
| 6241 | |
| 6242 | return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, |
| 6243 | DAG.getVTList(VT), Ops, VT, MMO); |
| 6244 | } |
| 6245 | |
| 6246 | // We have a divergent offset. Emit a MUBUF buffer load instead. We can |
| 6247 | // assume that the buffer is unswizzled. |
| 6248 | SmallVector<SDValue, 4> Loads; |
| 6249 | unsigned NumLoads = 1; |
| 6250 | MVT LoadVT = VT.getSimpleVT(); |
| 6251 | unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; |
| 6252 | assert((LoadVT.getScalarType() == MVT::i32 || |
| 6253 | LoadVT.getScalarType() == MVT::f32)); |
| 6254 | |
| 6255 | if (NumElts == 8 || NumElts == 16) { |
| 6256 | NumLoads = NumElts / 4; |
| 6257 | LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); |
| 6258 | } |
| 6259 | |
| 6260 | SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); |
| 6261 | SDValue Ops[] = { |
| 6262 | DAG.getEntryNode(), // Chain |
| 6263 | Rsrc, // rsrc |
| 6264 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 6265 | {}, // voffset |
| 6266 | {}, // soffset |
| 6267 | {}, // offset |
| 6268 | CachePolicy, // cachepolicy |
| 6269 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 6270 | }; |
| 6271 | |
| 6272 | // Use the alignment to ensure that the required offsets will fit into the |
| 6273 | // immediate offsets. |
| 6274 | setBufferOffsets(Offset, DAG, &Ops[3], |
| 6275 | NumLoads > 1 ? Align(16 * NumLoads) : Align(4)); |
| 6276 | |
| 6277 | uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); |
| 6278 | for (unsigned i = 0; i < NumLoads; ++i) { |
| 6279 | Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); |
| 6280 | Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, |
| 6281 | LoadVT, MMO, DAG)); |
| 6282 | } |
| 6283 | |
| 6284 | if (NumElts == 8 || NumElts == 16) |
| 6285 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); |
| 6286 | |
| 6287 | return Loads[0]; |
| 6288 | } |
| 6289 | |
| 6290 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
| 6291 | SelectionDAG &DAG) const { |
| 6292 | MachineFunction &MF = DAG.getMachineFunction(); |
| 6293 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 6294 | |
| 6295 | EVT VT = Op.getValueType(); |
| 6296 | SDLoc DL(Op); |
| 6297 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 6298 | |
| 6299 | // TODO: Should this propagate fast-math-flags? |
| 6300 | |
| 6301 | switch (IntrinsicID) { |
| 6302 | case Intrinsic::amdgcn_implicit_buffer_ptr: { |
| 6303 | if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) |
| 6304 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6305 | return getPreloadedValue(DAG, *MFI, VT, |
| 6306 | AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); |
| 6307 | } |
| 6308 | case Intrinsic::amdgcn_dispatch_ptr: |
| 6309 | case Intrinsic::amdgcn_queue_ptr: { |
| 6310 | if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { |
| 6311 | DiagnosticInfoUnsupported BadIntrin( |
| 6312 | MF.getFunction(), "unsupported hsa intrinsic without hsa target" , |
| 6313 | DL.getDebugLoc()); |
| 6314 | DAG.getContext()->diagnose(BadIntrin); |
| 6315 | return DAG.getUNDEF(VT); |
| 6316 | } |
| 6317 | |
| 6318 | auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? |
| 6319 | AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; |
| 6320 | return getPreloadedValue(DAG, *MFI, VT, RegID); |
| 6321 | } |
| 6322 | case Intrinsic::amdgcn_implicitarg_ptr: { |
| 6323 | if (MFI->isEntryFunction()) |
| 6324 | return getImplicitArgPtr(DAG, DL); |
| 6325 | return getPreloadedValue(DAG, *MFI, VT, |
| 6326 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
| 6327 | } |
| 6328 | case Intrinsic::amdgcn_kernarg_segment_ptr: { |
| 6329 | if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) { |
| 6330 | // This only makes sense to call in a kernel, so just lower to null. |
| 6331 | return DAG.getConstant(0, DL, VT); |
| 6332 | } |
| 6333 | |
| 6334 | return getPreloadedValue(DAG, *MFI, VT, |
| 6335 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
| 6336 | } |
| 6337 | case Intrinsic::amdgcn_dispatch_id: { |
| 6338 | return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); |
| 6339 | } |
| 6340 | case Intrinsic::amdgcn_rcp: |
| 6341 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); |
| 6342 | case Intrinsic::amdgcn_rsq: |
| 6343 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
| 6344 | case Intrinsic::amdgcn_rsq_legacy: |
| 6345 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 6346 | return emitRemovedIntrinsicError(DAG, DL, VT); |
| 6347 | return SDValue(); |
| 6348 | case Intrinsic::amdgcn_rcp_legacy: |
| 6349 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 6350 | return emitRemovedIntrinsicError(DAG, DL, VT); |
| 6351 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); |
| 6352 | case Intrinsic::amdgcn_rsq_clamp: { |
| 6353 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 6354 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); |
| 6355 | |
| 6356 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); |
| 6357 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); |
| 6358 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); |
| 6359 | |
| 6360 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
| 6361 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, |
| 6362 | DAG.getConstantFP(Max, DL, VT)); |
| 6363 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, |
| 6364 | DAG.getConstantFP(Min, DL, VT)); |
| 6365 | } |
| 6366 | case Intrinsic::r600_read_ngroups_x: |
| 6367 | if (Subtarget->isAmdHsaOS()) |
| 6368 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6369 | |
| 6370 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6371 | SI::KernelInputOffsets::NGROUPS_X, Align(4), |
| 6372 | false); |
| 6373 | case Intrinsic::r600_read_ngroups_y: |
| 6374 | if (Subtarget->isAmdHsaOS()) |
| 6375 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6376 | |
| 6377 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6378 | SI::KernelInputOffsets::NGROUPS_Y, Align(4), |
| 6379 | false); |
| 6380 | case Intrinsic::r600_read_ngroups_z: |
| 6381 | if (Subtarget->isAmdHsaOS()) |
| 6382 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6383 | |
| 6384 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6385 | SI::KernelInputOffsets::NGROUPS_Z, Align(4), |
| 6386 | false); |
| 6387 | case Intrinsic::r600_read_global_size_x: |
| 6388 | if (Subtarget->isAmdHsaOS()) |
| 6389 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6390 | |
| 6391 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6392 | SI::KernelInputOffsets::GLOBAL_SIZE_X, |
| 6393 | Align(4), false); |
| 6394 | case Intrinsic::r600_read_global_size_y: |
| 6395 | if (Subtarget->isAmdHsaOS()) |
| 6396 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6397 | |
| 6398 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6399 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, |
| 6400 | Align(4), false); |
| 6401 | case Intrinsic::r600_read_global_size_z: |
| 6402 | if (Subtarget->isAmdHsaOS()) |
| 6403 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6404 | |
| 6405 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 6406 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, |
| 6407 | Align(4), false); |
| 6408 | case Intrinsic::r600_read_local_size_x: |
| 6409 | if (Subtarget->isAmdHsaOS()) |
| 6410 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6411 | |
| 6412 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 6413 | SI::KernelInputOffsets::LOCAL_SIZE_X); |
| 6414 | case Intrinsic::r600_read_local_size_y: |
| 6415 | if (Subtarget->isAmdHsaOS()) |
| 6416 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6417 | |
| 6418 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 6419 | SI::KernelInputOffsets::LOCAL_SIZE_Y); |
| 6420 | case Intrinsic::r600_read_local_size_z: |
| 6421 | if (Subtarget->isAmdHsaOS()) |
| 6422 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
| 6423 | |
| 6424 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 6425 | SI::KernelInputOffsets::LOCAL_SIZE_Z); |
| 6426 | case Intrinsic::amdgcn_workgroup_id_x: |
| 6427 | return getPreloadedValue(DAG, *MFI, VT, |
| 6428 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X); |
| 6429 | case Intrinsic::amdgcn_workgroup_id_y: |
| 6430 | return getPreloadedValue(DAG, *MFI, VT, |
| 6431 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); |
| 6432 | case Intrinsic::amdgcn_workgroup_id_z: |
| 6433 | return getPreloadedValue(DAG, *MFI, VT, |
| 6434 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); |
| 6435 | case Intrinsic::amdgcn_workitem_id_x: |
| 6436 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
| 6437 | SDLoc(DAG.getEntryNode()), |
| 6438 | MFI->getArgInfo().WorkItemIDX); |
| 6439 | case Intrinsic::amdgcn_workitem_id_y: |
| 6440 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
| 6441 | SDLoc(DAG.getEntryNode()), |
| 6442 | MFI->getArgInfo().WorkItemIDY); |
| 6443 | case Intrinsic::amdgcn_workitem_id_z: |
| 6444 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
| 6445 | SDLoc(DAG.getEntryNode()), |
| 6446 | MFI->getArgInfo().WorkItemIDZ); |
| 6447 | case Intrinsic::amdgcn_wavefrontsize: |
| 6448 | return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), |
| 6449 | SDLoc(Op), MVT::i32); |
| 6450 | case Intrinsic::amdgcn_s_buffer_load: { |
| 6451 | bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); |
| 6452 | SDValue GLC; |
| 6453 | SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1); |
| 6454 | if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr, |
| 6455 | IsGFX10Plus ? &DLC : nullptr)) |
| 6456 | return Op; |
| 6457 | return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
| 6458 | DAG); |
| 6459 | } |
| 6460 | case Intrinsic::amdgcn_fdiv_fast: |
| 6461 | return lowerFDIV_FAST(Op, DAG); |
| 6462 | case Intrinsic::amdgcn_sin: |
| 6463 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); |
| 6464 | |
| 6465 | case Intrinsic::amdgcn_cos: |
| 6466 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); |
| 6467 | |
| 6468 | case Intrinsic::amdgcn_mul_u24: |
| 6469 | return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
| 6470 | case Intrinsic::amdgcn_mul_i24: |
| 6471 | return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
| 6472 | |
| 6473 | case Intrinsic::amdgcn_log_clamp: { |
| 6474 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 6475 | return SDValue(); |
| 6476 | |
| 6477 | return emitRemovedIntrinsicError(DAG, DL, VT); |
| 6478 | } |
| 6479 | case Intrinsic::amdgcn_ldexp: |
| 6480 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, |
| 6481 | Op.getOperand(1), Op.getOperand(2)); |
| 6482 | |
| 6483 | case Intrinsic::amdgcn_fract: |
| 6484 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); |
| 6485 | |
| 6486 | case Intrinsic::amdgcn_class: |
| 6487 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, |
| 6488 | Op.getOperand(1), Op.getOperand(2)); |
| 6489 | case Intrinsic::amdgcn_div_fmas: |
| 6490 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, |
| 6491 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
| 6492 | Op.getOperand(4)); |
| 6493 | |
| 6494 | case Intrinsic::amdgcn_div_fixup: |
| 6495 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, |
| 6496 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 6497 | |
| 6498 | case Intrinsic::amdgcn_div_scale: { |
| 6499 | const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); |
| 6500 | |
| 6501 | // Translate to the operands expected by the machine instruction. The |
| 6502 | // first parameter must be the same as the first instruction. |
| 6503 | SDValue Numerator = Op.getOperand(1); |
| 6504 | SDValue Denominator = Op.getOperand(2); |
| 6505 | |
| 6506 | // Note this order is opposite of the machine instruction's operations, |
| 6507 | // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The |
| 6508 | // intrinsic has the numerator as the first operand to match a normal |
| 6509 | // division operation. |
| 6510 | |
| 6511 | SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; |
| 6512 | |
| 6513 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, |
| 6514 | Denominator, Numerator); |
| 6515 | } |
| 6516 | case Intrinsic::amdgcn_icmp: { |
| 6517 | // There is a Pat that handles this variant, so return it as-is. |
| 6518 | if (Op.getOperand(1).getValueType() == MVT::i1 && |
| 6519 | Op.getConstantOperandVal(2) == 0 && |
| 6520 | Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) |
| 6521 | return Op; |
| 6522 | return lowerICMPIntrinsic(*this, Op.getNode(), DAG); |
| 6523 | } |
| 6524 | case Intrinsic::amdgcn_fcmp: { |
| 6525 | return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); |
| 6526 | } |
| 6527 | case Intrinsic::amdgcn_ballot: |
| 6528 | return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG); |
| 6529 | case Intrinsic::amdgcn_fmed3: |
| 6530 | return DAG.getNode(AMDGPUISD::FMED3, DL, VT, |
| 6531 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 6532 | case Intrinsic::amdgcn_fdot2: |
| 6533 | return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, |
| 6534 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
| 6535 | Op.getOperand(4)); |
| 6536 | case Intrinsic::amdgcn_fmul_legacy: |
| 6537 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, |
| 6538 | Op.getOperand(1), Op.getOperand(2)); |
| 6539 | case Intrinsic::amdgcn_sffbh: |
| 6540 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); |
| 6541 | case Intrinsic::amdgcn_sbfe: |
| 6542 | return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, |
| 6543 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 6544 | case Intrinsic::amdgcn_ubfe: |
| 6545 | return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, |
| 6546 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 6547 | case Intrinsic::amdgcn_cvt_pkrtz: |
| 6548 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
| 6549 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
| 6550 | case Intrinsic::amdgcn_cvt_pk_i16: |
| 6551 | case Intrinsic::amdgcn_cvt_pk_u16: { |
| 6552 | // FIXME: Stop adding cast if v2f16/v2i16 are legal. |
| 6553 | EVT VT = Op.getValueType(); |
| 6554 | unsigned Opcode; |
| 6555 | |
| 6556 | if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) |
| 6557 | Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; |
| 6558 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) |
| 6559 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
| 6560 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) |
| 6561 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
| 6562 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) |
| 6563 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
| 6564 | else |
| 6565 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
| 6566 | |
| 6567 | if (isTypeLegal(VT)) |
| 6568 | return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
| 6569 | |
| 6570 | SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, |
| 6571 | Op.getOperand(1), Op.getOperand(2)); |
| 6572 | return DAG.getNode(ISD::BITCAST, DL, VT, Node); |
| 6573 | } |
| 6574 | case Intrinsic::amdgcn_fmad_ftz: |
| 6575 | return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), |
| 6576 | Op.getOperand(2), Op.getOperand(3)); |
| 6577 | |
| 6578 | case Intrinsic::amdgcn_if_break: |
| 6579 | return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, |
| 6580 | Op->getOperand(1), Op->getOperand(2)), 0); |
| 6581 | |
| 6582 | case Intrinsic::amdgcn_groupstaticsize: { |
| 6583 | Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); |
| 6584 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) |
| 6585 | return Op; |
| 6586 | |
| 6587 | const Module *M = MF.getFunction().getParent(); |
| 6588 | const GlobalValue *GV = |
| 6589 | M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); |
| 6590 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, |
| 6591 | SIInstrInfo::MO_ABS32_LO); |
| 6592 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; |
| 6593 | } |
| 6594 | case Intrinsic::amdgcn_is_shared: |
| 6595 | case Intrinsic::amdgcn_is_private: { |
| 6596 | SDLoc SL(Op); |
| 6597 | unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? |
| 6598 | AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; |
| 6599 | SDValue Aperture = getSegmentAperture(AS, SL, DAG); |
| 6600 | SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, |
| 6601 | Op.getOperand(1)); |
| 6602 | |
| 6603 | SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, |
| 6604 | DAG.getConstant(1, SL, MVT::i32)); |
| 6605 | return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); |
| 6606 | } |
| 6607 | case Intrinsic::amdgcn_alignbit: |
| 6608 | return DAG.getNode(ISD::FSHR, DL, VT, |
| 6609 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 6610 | case Intrinsic::amdgcn_reloc_constant: { |
| 6611 | Module *M = const_cast<Module *>(MF.getFunction().getParent()); |
| 6612 | const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD(); |
| 6613 | auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); |
| 6614 | auto RelocSymbol = cast<GlobalVariable>( |
| 6615 | M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); |
| 6616 | SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0, |
| 6617 | SIInstrInfo::MO_ABS32_LO); |
| 6618 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; |
| 6619 | } |
| 6620 | default: |
| 6621 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
| 6622 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
| 6623 | return lowerImage(Op, ImageDimIntr, DAG, false); |
| 6624 | |
| 6625 | return Op; |
| 6626 | } |
| 6627 | } |
| 6628 | |
| 6629 | // This function computes an appropriate offset to pass to |
| 6630 | // MachineMemOperand::setOffset() based on the offset inputs to |
| 6631 | // an intrinsic. If any of the offsets are non-contstant or |
| 6632 | // if VIndex is non-zero then this function returns 0. Otherwise, |
| 6633 | // it returns the sum of VOffset, SOffset, and Offset. |
| 6634 | static unsigned getBufferOffsetForMMO(SDValue VOffset, |
| 6635 | SDValue SOffset, |
| 6636 | SDValue Offset, |
| 6637 | SDValue VIndex = SDValue()) { |
| 6638 | |
| 6639 | if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || |
| 6640 | !isa<ConstantSDNode>(Offset)) |
| 6641 | return 0; |
| 6642 | |
| 6643 | if (VIndex) { |
| 6644 | if (!isa<ConstantSDNode>(VIndex) || !cast<ConstantSDNode>(VIndex)->isNullValue()) |
| 6645 | return 0; |
| 6646 | } |
| 6647 | |
| 6648 | return cast<ConstantSDNode>(VOffset)->getSExtValue() + |
| 6649 | cast<ConstantSDNode>(SOffset)->getSExtValue() + |
| 6650 | cast<ConstantSDNode>(Offset)->getSExtValue(); |
| 6651 | } |
| 6652 | |
| 6653 | SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, |
| 6654 | SelectionDAG &DAG, |
| 6655 | unsigned NewOpcode) const { |
| 6656 | SDLoc DL(Op); |
| 6657 | |
| 6658 | SDValue VData = Op.getOperand(2); |
| 6659 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
| 6660 | SDValue Ops[] = { |
| 6661 | Op.getOperand(0), // Chain |
| 6662 | VData, // vdata |
| 6663 | Op.getOperand(3), // rsrc |
| 6664 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 6665 | Offsets.first, // voffset |
| 6666 | Op.getOperand(5), // soffset |
| 6667 | Offsets.second, // offset |
| 6668 | Op.getOperand(6), // cachepolicy |
| 6669 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 6670 | }; |
| 6671 | |
| 6672 | auto *M = cast<MemSDNode>(Op); |
| 6673 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); |
| 6674 | |
| 6675 | EVT MemVT = VData.getValueType(); |
| 6676 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, |
| 6677 | M->getMemOperand()); |
| 6678 | } |
| 6679 | |
| 6680 | SDValue |
| 6681 | SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, |
| 6682 | unsigned NewOpcode) const { |
| 6683 | SDLoc DL(Op); |
| 6684 | |
| 6685 | SDValue VData = Op.getOperand(2); |
| 6686 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
| 6687 | SDValue Ops[] = { |
| 6688 | Op.getOperand(0), // Chain |
| 6689 | VData, // vdata |
| 6690 | Op.getOperand(3), // rsrc |
| 6691 | Op.getOperand(4), // vindex |
| 6692 | Offsets.first, // voffset |
| 6693 | Op.getOperand(6), // soffset |
| 6694 | Offsets.second, // offset |
| 6695 | Op.getOperand(7), // cachepolicy |
| 6696 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
| 6697 | }; |
| 6698 | |
| 6699 | auto *M = cast<MemSDNode>(Op); |
| 6700 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], |
| 6701 | Ops[3])); |
| 6702 | |
| 6703 | EVT MemVT = VData.getValueType(); |
| 6704 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, |
| 6705 | M->getMemOperand()); |
| 6706 | } |
| 6707 | |
| 6708 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, |
| 6709 | SelectionDAG &DAG) const { |
| 6710 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
| 6711 | SDLoc DL(Op); |
| 6712 | |
| 6713 | switch (IntrID) { |
| 6714 | case Intrinsic::amdgcn_ds_ordered_add: |
| 6715 | case Intrinsic::amdgcn_ds_ordered_swap: { |
| 6716 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6717 | SDValue Chain = M->getOperand(0); |
| 6718 | SDValue M0 = M->getOperand(2); |
| 6719 | SDValue Value = M->getOperand(3); |
| 6720 | unsigned IndexOperand = M->getConstantOperandVal(7); |
| 6721 | unsigned WaveRelease = M->getConstantOperandVal(8); |
| 6722 | unsigned WaveDone = M->getConstantOperandVal(9); |
| 6723 | |
| 6724 | unsigned OrderedCountIndex = IndexOperand & 0x3f; |
| 6725 | IndexOperand &= ~0x3f; |
| 6726 | unsigned CountDw = 0; |
| 6727 | |
| 6728 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { |
| 6729 | CountDw = (IndexOperand >> 24) & 0xf; |
| 6730 | IndexOperand &= ~(0xf << 24); |
| 6731 | |
| 6732 | if (CountDw < 1 || CountDw > 4) { |
| 6733 | report_fatal_error( |
| 6734 | "ds_ordered_count: dword count must be between 1 and 4" ); |
| 6735 | } |
| 6736 | } |
| 6737 | |
| 6738 | if (IndexOperand) |
| 6739 | report_fatal_error("ds_ordered_count: bad index operand" ); |
| 6740 | |
| 6741 | if (WaveDone && !WaveRelease) |
| 6742 | report_fatal_error("ds_ordered_count: wave_done requires wave_release" ); |
| 6743 | |
| 6744 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; |
| 6745 | unsigned ShaderType = |
| 6746 | SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction()); |
| 6747 | unsigned Offset0 = OrderedCountIndex << 2; |
| 6748 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | |
| 6749 | (Instruction << 4); |
| 6750 | |
| 6751 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) |
| 6752 | Offset1 |= (CountDw - 1) << 6; |
| 6753 | |
| 6754 | unsigned Offset = Offset0 | (Offset1 << 8); |
| 6755 | |
| 6756 | SDValue Ops[] = { |
| 6757 | Chain, |
| 6758 | Value, |
| 6759 | DAG.getTargetConstant(Offset, DL, MVT::i16), |
| 6760 | copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue |
| 6761 | }; |
| 6762 | return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, |
| 6763 | M->getVTList(), Ops, M->getMemoryVT(), |
| 6764 | M->getMemOperand()); |
| 6765 | } |
| 6766 | case Intrinsic::amdgcn_ds_fadd: { |
| 6767 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6768 | unsigned Opc; |
| 6769 | switch (IntrID) { |
| 6770 | case Intrinsic::amdgcn_ds_fadd: |
| 6771 | Opc = ISD::ATOMIC_LOAD_FADD; |
| 6772 | break; |
| 6773 | } |
| 6774 | |
| 6775 | return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), |
| 6776 | M->getOperand(0), M->getOperand(2), M->getOperand(3), |
| 6777 | M->getMemOperand()); |
| 6778 | } |
| 6779 | case Intrinsic::amdgcn_atomic_inc: |
| 6780 | case Intrinsic::amdgcn_atomic_dec: |
| 6781 | case Intrinsic::amdgcn_ds_fmin: |
| 6782 | case Intrinsic::amdgcn_ds_fmax: { |
| 6783 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6784 | unsigned Opc; |
| 6785 | switch (IntrID) { |
| 6786 | case Intrinsic::amdgcn_atomic_inc: |
| 6787 | Opc = AMDGPUISD::ATOMIC_INC; |
| 6788 | break; |
| 6789 | case Intrinsic::amdgcn_atomic_dec: |
| 6790 | Opc = AMDGPUISD::ATOMIC_DEC; |
| 6791 | break; |
| 6792 | case Intrinsic::amdgcn_ds_fmin: |
| 6793 | Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; |
| 6794 | break; |
| 6795 | case Intrinsic::amdgcn_ds_fmax: |
| 6796 | Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; |
| 6797 | break; |
| 6798 | default: |
| 6799 | llvm_unreachable("Unknown intrinsic!" ); |
| 6800 | } |
| 6801 | SDValue Ops[] = { |
| 6802 | M->getOperand(0), // Chain |
| 6803 | M->getOperand(2), // Ptr |
| 6804 | M->getOperand(3) // Value |
| 6805 | }; |
| 6806 | |
| 6807 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, |
| 6808 | M->getMemoryVT(), M->getMemOperand()); |
| 6809 | } |
| 6810 | case Intrinsic::amdgcn_buffer_load: |
| 6811 | case Intrinsic::amdgcn_buffer_load_format: { |
| 6812 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); |
| 6813 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
| 6814 | unsigned IdxEn = 1; |
| 6815 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) |
| 6816 | IdxEn = Idx->getZExtValue() != 0; |
| 6817 | SDValue Ops[] = { |
| 6818 | Op.getOperand(0), // Chain |
| 6819 | Op.getOperand(2), // rsrc |
| 6820 | Op.getOperand(3), // vindex |
| 6821 | SDValue(), // voffset -- will be set by setBufferOffsets |
| 6822 | SDValue(), // soffset -- will be set by setBufferOffsets |
| 6823 | SDValue(), // offset -- will be set by setBufferOffsets |
| 6824 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
| 6825 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
| 6826 | }; |
| 6827 | |
| 6828 | unsigned Offset = setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); |
| 6829 | // We don't know the offset if vindex is non-zero, so clear it. |
| 6830 | if (IdxEn) |
| 6831 | Offset = 0; |
| 6832 | |
| 6833 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? |
| 6834 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; |
| 6835 | |
| 6836 | EVT VT = Op.getValueType(); |
| 6837 | EVT IntVT = VT.changeTypeToInteger(); |
| 6838 | auto *M = cast<MemSDNode>(Op); |
| 6839 | M->getMemOperand()->setOffset(Offset); |
| 6840 | EVT LoadVT = Op.getValueType(); |
| 6841 | |
| 6842 | if (LoadVT.getScalarType() == MVT::f16) |
| 6843 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, |
| 6844 | M, DAG, Ops); |
| 6845 | |
| 6846 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics |
| 6847 | if (LoadVT.getScalarType() == MVT::i8 || |
| 6848 | LoadVT.getScalarType() == MVT::i16) |
| 6849 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
| 6850 | |
| 6851 | return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, |
| 6852 | M->getMemOperand(), DAG); |
| 6853 | } |
| 6854 | case Intrinsic::amdgcn_raw_buffer_load: |
| 6855 | case Intrinsic::amdgcn_raw_buffer_load_format: { |
| 6856 | const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; |
| 6857 | |
| 6858 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
| 6859 | SDValue Ops[] = { |
| 6860 | Op.getOperand(0), // Chain |
| 6861 | Op.getOperand(2), // rsrc |
| 6862 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 6863 | Offsets.first, // voffset |
| 6864 | Op.getOperand(4), // soffset |
| 6865 | Offsets.second, // offset |
| 6866 | Op.getOperand(5), // cachepolicy, swizzled buffer |
| 6867 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 6868 | }; |
| 6869 | |
| 6870 | auto *M = cast<MemSDNode>(Op); |
| 6871 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5])); |
| 6872 | return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); |
| 6873 | } |
| 6874 | case Intrinsic::amdgcn_struct_buffer_load: |
| 6875 | case Intrinsic::amdgcn_struct_buffer_load_format: { |
| 6876 | const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; |
| 6877 | |
| 6878 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
| 6879 | SDValue Ops[] = { |
| 6880 | Op.getOperand(0), // Chain |
| 6881 | Op.getOperand(2), // rsrc |
| 6882 | Op.getOperand(3), // vindex |
| 6883 | Offsets.first, // voffset |
| 6884 | Op.getOperand(5), // soffset |
| 6885 | Offsets.second, // offset |
| 6886 | Op.getOperand(6), // cachepolicy, swizzled buffer |
| 6887 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
| 6888 | }; |
| 6889 | |
| 6890 | auto *M = cast<MemSDNode>(Op); |
| 6891 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5], |
| 6892 | Ops[2])); |
| 6893 | return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); |
| 6894 | } |
| 6895 | case Intrinsic::amdgcn_tbuffer_load: { |
| 6896 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6897 | EVT LoadVT = Op.getValueType(); |
| 6898 | |
| 6899 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
| 6900 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
| 6901 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
| 6902 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
| 6903 | unsigned IdxEn = 1; |
| 6904 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3))) |
| 6905 | IdxEn = Idx->getZExtValue() != 0; |
| 6906 | SDValue Ops[] = { |
| 6907 | Op.getOperand(0), // Chain |
| 6908 | Op.getOperand(2), // rsrc |
| 6909 | Op.getOperand(3), // vindex |
| 6910 | Op.getOperand(4), // voffset |
| 6911 | Op.getOperand(5), // soffset |
| 6912 | Op.getOperand(6), // offset |
| 6913 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format |
| 6914 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
| 6915 | DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen |
| 6916 | }; |
| 6917 | |
| 6918 | if (LoadVT.getScalarType() == MVT::f16) |
| 6919 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
| 6920 | M, DAG, Ops); |
| 6921 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
| 6922 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
| 6923 | DAG); |
| 6924 | } |
| 6925 | case Intrinsic::amdgcn_raw_tbuffer_load: { |
| 6926 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6927 | EVT LoadVT = Op.getValueType(); |
| 6928 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
| 6929 | |
| 6930 | SDValue Ops[] = { |
| 6931 | Op.getOperand(0), // Chain |
| 6932 | Op.getOperand(2), // rsrc |
| 6933 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 6934 | Offsets.first, // voffset |
| 6935 | Op.getOperand(4), // soffset |
| 6936 | Offsets.second, // offset |
| 6937 | Op.getOperand(5), // format |
| 6938 | Op.getOperand(6), // cachepolicy, swizzled buffer |
| 6939 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 6940 | }; |
| 6941 | |
| 6942 | if (LoadVT.getScalarType() == MVT::f16) |
| 6943 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
| 6944 | M, DAG, Ops); |
| 6945 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
| 6946 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
| 6947 | DAG); |
| 6948 | } |
| 6949 | case Intrinsic::amdgcn_struct_tbuffer_load: { |
| 6950 | MemSDNode *M = cast<MemSDNode>(Op); |
| 6951 | EVT LoadVT = Op.getValueType(); |
| 6952 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
| 6953 | |
| 6954 | SDValue Ops[] = { |
| 6955 | Op.getOperand(0), // Chain |
| 6956 | Op.getOperand(2), // rsrc |
| 6957 | Op.getOperand(3), // vindex |
| 6958 | Offsets.first, // voffset |
| 6959 | Op.getOperand(5), // soffset |
| 6960 | Offsets.second, // offset |
| 6961 | Op.getOperand(6), // format |
| 6962 | Op.getOperand(7), // cachepolicy, swizzled buffer |
| 6963 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
| 6964 | }; |
| 6965 | |
| 6966 | if (LoadVT.getScalarType() == MVT::f16) |
| 6967 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
| 6968 | M, DAG, Ops); |
| 6969 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
| 6970 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
| 6971 | DAG); |
| 6972 | } |
| 6973 | case Intrinsic::amdgcn_buffer_atomic_swap: |
| 6974 | case Intrinsic::amdgcn_buffer_atomic_add: |
| 6975 | case Intrinsic::amdgcn_buffer_atomic_sub: |
| 6976 | case Intrinsic::amdgcn_buffer_atomic_csub: |
| 6977 | case Intrinsic::amdgcn_buffer_atomic_smin: |
| 6978 | case Intrinsic::amdgcn_buffer_atomic_umin: |
| 6979 | case Intrinsic::amdgcn_buffer_atomic_smax: |
| 6980 | case Intrinsic::amdgcn_buffer_atomic_umax: |
| 6981 | case Intrinsic::amdgcn_buffer_atomic_and: |
| 6982 | case Intrinsic::amdgcn_buffer_atomic_or: |
| 6983 | case Intrinsic::amdgcn_buffer_atomic_xor: |
| 6984 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
| 6985 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
| 6986 | unsigned IdxEn = 1; |
| 6987 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
| 6988 | IdxEn = Idx->getZExtValue() != 0; |
| 6989 | SDValue Ops[] = { |
| 6990 | Op.getOperand(0), // Chain |
| 6991 | Op.getOperand(2), // vdata |
| 6992 | Op.getOperand(3), // rsrc |
| 6993 | Op.getOperand(4), // vindex |
| 6994 | SDValue(), // voffset -- will be set by setBufferOffsets |
| 6995 | SDValue(), // soffset -- will be set by setBufferOffsets |
| 6996 | SDValue(), // offset -- will be set by setBufferOffsets |
| 6997 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy |
| 6998 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
| 6999 | }; |
| 7000 | unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
| 7001 | // We don't know the offset if vindex is non-zero, so clear it. |
| 7002 | if (IdxEn) |
| 7003 | Offset = 0; |
| 7004 | EVT VT = Op.getValueType(); |
| 7005 | |
| 7006 | auto *M = cast<MemSDNode>(Op); |
| 7007 | M->getMemOperand()->setOffset(Offset); |
| 7008 | unsigned Opcode = 0; |
| 7009 | |
| 7010 | switch (IntrID) { |
| 7011 | case Intrinsic::amdgcn_buffer_atomic_swap: |
| 7012 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; |
| 7013 | break; |
| 7014 | case Intrinsic::amdgcn_buffer_atomic_add: |
| 7015 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; |
| 7016 | break; |
| 7017 | case Intrinsic::amdgcn_buffer_atomic_sub: |
| 7018 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; |
| 7019 | break; |
| 7020 | case Intrinsic::amdgcn_buffer_atomic_csub: |
| 7021 | Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB; |
| 7022 | break; |
| 7023 | case Intrinsic::amdgcn_buffer_atomic_smin: |
| 7024 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; |
| 7025 | break; |
| 7026 | case Intrinsic::amdgcn_buffer_atomic_umin: |
| 7027 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; |
| 7028 | break; |
| 7029 | case Intrinsic::amdgcn_buffer_atomic_smax: |
| 7030 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; |
| 7031 | break; |
| 7032 | case Intrinsic::amdgcn_buffer_atomic_umax: |
| 7033 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; |
| 7034 | break; |
| 7035 | case Intrinsic::amdgcn_buffer_atomic_and: |
| 7036 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; |
| 7037 | break; |
| 7038 | case Intrinsic::amdgcn_buffer_atomic_or: |
| 7039 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; |
| 7040 | break; |
| 7041 | case Intrinsic::amdgcn_buffer_atomic_xor: |
| 7042 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; |
| 7043 | break; |
| 7044 | case Intrinsic::amdgcn_buffer_atomic_fadd: |
| 7045 | if (!Op.getValue(0).use_empty()) { |
| 7046 | DiagnosticInfoUnsupported |
| 7047 | NoFpRet(DAG.getMachineFunction().getFunction(), |
| 7048 | "return versions of fp atomics not supported" , |
| 7049 | DL.getDebugLoc(), DS_Error); |
| 7050 | DAG.getContext()->diagnose(NoFpRet); |
| 7051 | return SDValue(); |
| 7052 | } |
| 7053 | Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD; |
| 7054 | break; |
| 7055 | default: |
| 7056 | llvm_unreachable("unhandled atomic opcode" ); |
| 7057 | } |
| 7058 | |
| 7059 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
| 7060 | M->getMemOperand()); |
| 7061 | } |
| 7062 | case Intrinsic::amdgcn_raw_buffer_atomic_fadd: |
| 7063 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); |
| 7064 | case Intrinsic::amdgcn_struct_buffer_atomic_fadd: |
| 7065 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); |
| 7066 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: |
| 7067 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP); |
| 7068 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
| 7069 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); |
| 7070 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
| 7071 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); |
| 7072 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
| 7073 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN); |
| 7074 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
| 7075 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN); |
| 7076 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
| 7077 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX); |
| 7078 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
| 7079 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX); |
| 7080 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
| 7081 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); |
| 7082 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
| 7083 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); |
| 7084 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
| 7085 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); |
| 7086 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: |
| 7087 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); |
| 7088 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: |
| 7089 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); |
| 7090 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: |
| 7091 | return lowerStructBufferAtomicIntrin(Op, DAG, |
| 7092 | AMDGPUISD::BUFFER_ATOMIC_SWAP); |
| 7093 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
| 7094 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); |
| 7095 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
| 7096 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); |
| 7097 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
| 7098 | return lowerStructBufferAtomicIntrin(Op, DAG, |
| 7099 | AMDGPUISD::BUFFER_ATOMIC_SMIN); |
| 7100 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
| 7101 | return lowerStructBufferAtomicIntrin(Op, DAG, |
| 7102 | AMDGPUISD::BUFFER_ATOMIC_UMIN); |
| 7103 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
| 7104 | return lowerStructBufferAtomicIntrin(Op, DAG, |
| 7105 | AMDGPUISD::BUFFER_ATOMIC_SMAX); |
| 7106 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
| 7107 | return lowerStructBufferAtomicIntrin(Op, DAG, |
| 7108 | AMDGPUISD::BUFFER_ATOMIC_UMAX); |
| 7109 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
| 7110 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); |
| 7111 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
| 7112 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); |
| 7113 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
| 7114 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); |
| 7115 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: |
| 7116 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); |
| 7117 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: |
| 7118 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); |
| 7119 | |
| 7120 | case Intrinsic::amdgcn_buffer_atomic_cmpswap: { |
| 7121 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
| 7122 | unsigned IdxEn = 1; |
| 7123 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5))) |
| 7124 | IdxEn = Idx->getZExtValue() != 0; |
| 7125 | SDValue Ops[] = { |
| 7126 | Op.getOperand(0), // Chain |
| 7127 | Op.getOperand(2), // src |
| 7128 | Op.getOperand(3), // cmp |
| 7129 | Op.getOperand(4), // rsrc |
| 7130 | Op.getOperand(5), // vindex |
| 7131 | SDValue(), // voffset -- will be set by setBufferOffsets |
| 7132 | SDValue(), // soffset -- will be set by setBufferOffsets |
| 7133 | SDValue(), // offset -- will be set by setBufferOffsets |
| 7134 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy |
| 7135 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
| 7136 | }; |
| 7137 | unsigned Offset = setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); |
| 7138 | // We don't know the offset if vindex is non-zero, so clear it. |
| 7139 | if (IdxEn) |
| 7140 | Offset = 0; |
| 7141 | EVT VT = Op.getValueType(); |
| 7142 | auto *M = cast<MemSDNode>(Op); |
| 7143 | M->getMemOperand()->setOffset(Offset); |
| 7144 | |
| 7145 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
| 7146 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
| 7147 | } |
| 7148 | case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { |
| 7149 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
| 7150 | SDValue Ops[] = { |
| 7151 | Op.getOperand(0), // Chain |
| 7152 | Op.getOperand(2), // src |
| 7153 | Op.getOperand(3), // cmp |
| 7154 | Op.getOperand(4), // rsrc |
| 7155 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 7156 | Offsets.first, // voffset |
| 7157 | Op.getOperand(6), // soffset |
| 7158 | Offsets.second, // offset |
| 7159 | Op.getOperand(7), // cachepolicy |
| 7160 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 7161 | }; |
| 7162 | EVT VT = Op.getValueType(); |
| 7163 | auto *M = cast<MemSDNode>(Op); |
| 7164 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7])); |
| 7165 | |
| 7166 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
| 7167 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
| 7168 | } |
| 7169 | case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { |
| 7170 | auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); |
| 7171 | SDValue Ops[] = { |
| 7172 | Op.getOperand(0), // Chain |
| 7173 | Op.getOperand(2), // src |
| 7174 | Op.getOperand(3), // cmp |
| 7175 | Op.getOperand(4), // rsrc |
| 7176 | Op.getOperand(5), // vindex |
| 7177 | Offsets.first, // voffset |
| 7178 | Op.getOperand(7), // soffset |
| 7179 | Offsets.second, // offset |
| 7180 | Op.getOperand(8), // cachepolicy |
| 7181 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
| 7182 | }; |
| 7183 | EVT VT = Op.getValueType(); |
| 7184 | auto *M = cast<MemSDNode>(Op); |
| 7185 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7], |
| 7186 | Ops[4])); |
| 7187 | |
| 7188 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
| 7189 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
| 7190 | } |
| 7191 | case Intrinsic::amdgcn_global_atomic_fadd: { |
| 7192 | if (!Op.getValue(0).use_empty()) { |
| 7193 | DiagnosticInfoUnsupported |
| 7194 | NoFpRet(DAG.getMachineFunction().getFunction(), |
| 7195 | "return versions of fp atomics not supported" , |
| 7196 | DL.getDebugLoc(), DS_Error); |
| 7197 | DAG.getContext()->diagnose(NoFpRet); |
| 7198 | return SDValue(); |
| 7199 | } |
| 7200 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7201 | SDValue Ops[] = { |
| 7202 | M->getOperand(0), // Chain |
| 7203 | M->getOperand(2), // Ptr |
| 7204 | M->getOperand(3) // Value |
| 7205 | }; |
| 7206 | |
| 7207 | EVT VT = Op.getOperand(3).getValueType(); |
| 7208 | return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, |
| 7209 | DAG.getVTList(VT, MVT::Other), Ops, |
| 7210 | M->getMemOperand()); |
| 7211 | } |
| 7212 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { |
| 7213 | SDLoc DL(Op); |
| 7214 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7215 | SDValue NodePtr = M->getOperand(2); |
| 7216 | SDValue RayExtent = M->getOperand(3); |
| 7217 | SDValue RayOrigin = M->getOperand(4); |
| 7218 | SDValue RayDir = M->getOperand(5); |
| 7219 | SDValue RayInvDir = M->getOperand(6); |
| 7220 | SDValue TDescr = M->getOperand(7); |
| 7221 | |
| 7222 | assert(NodePtr.getValueType() == MVT::i32 || |
| 7223 | NodePtr.getValueType() == MVT::i64); |
| 7224 | assert(RayDir.getValueType() == MVT::v4f16 || |
| 7225 | RayDir.getValueType() == MVT::v4f32); |
| 7226 | |
| 7227 | bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16; |
| 7228 | bool Is64 = NodePtr.getValueType() == MVT::i64; |
| 7229 | unsigned Opcode = IsA16 ? Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa |
| 7230 | : AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa |
| 7231 | : Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_nsa |
| 7232 | : AMDGPU::IMAGE_BVH_INTERSECT_RAY_nsa; |
| 7233 | |
| 7234 | SmallVector<SDValue, 16> Ops; |
| 7235 | |
| 7236 | auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) { |
| 7237 | SmallVector<SDValue, 3> Lanes; |
| 7238 | DAG.ExtractVectorElements(Op, Lanes, 0, 3); |
| 7239 | if (Lanes[0].getValueSizeInBits() == 32) { |
| 7240 | for (unsigned I = 0; I < 3; ++I) |
| 7241 | Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I])); |
| 7242 | } else { |
| 7243 | if (IsAligned) { |
| 7244 | Ops.push_back( |
| 7245 | DAG.getBitcast(MVT::i32, |
| 7246 | DAG.getBuildVector(MVT::v2f16, DL, |
| 7247 | { Lanes[0], Lanes[1] }))); |
| 7248 | Ops.push_back(Lanes[2]); |
| 7249 | } else { |
| 7250 | SDValue Elt0 = Ops.pop_back_val(); |
| 7251 | Ops.push_back( |
| 7252 | DAG.getBitcast(MVT::i32, |
| 7253 | DAG.getBuildVector(MVT::v2f16, DL, |
| 7254 | { Elt0, Lanes[0] }))); |
| 7255 | Ops.push_back( |
| 7256 | DAG.getBitcast(MVT::i32, |
| 7257 | DAG.getBuildVector(MVT::v2f16, DL, |
| 7258 | { Lanes[1], Lanes[2] }))); |
| 7259 | } |
| 7260 | } |
| 7261 | }; |
| 7262 | |
| 7263 | if (Is64) |
| 7264 | DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 2); |
| 7265 | else |
| 7266 | Ops.push_back(NodePtr); |
| 7267 | |
| 7268 | Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); |
| 7269 | packLanes(RayOrigin, true); |
| 7270 | packLanes(RayDir, true); |
| 7271 | packLanes(RayInvDir, false); |
| 7272 | Ops.push_back(TDescr); |
| 7273 | if (IsA16) |
| 7274 | Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1)); |
| 7275 | Ops.push_back(M->getChain()); |
| 7276 | |
| 7277 | auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops); |
| 7278 | MachineMemOperand *MemRef = M->getMemOperand(); |
| 7279 | DAG.setNodeMemRefs(NewNode, {MemRef}); |
| 7280 | return SDValue(NewNode, 0); |
| 7281 | } |
| 7282 | default: |
| 7283 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
| 7284 | AMDGPU::getImageDimIntrinsicInfo(IntrID)) |
| 7285 | return lowerImage(Op, ImageDimIntr, DAG, true); |
| 7286 | |
| 7287 | return SDValue(); |
| 7288 | } |
| 7289 | } |
| 7290 | |
| 7291 | // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to |
| 7292 | // dwordx4 if on SI. |
| 7293 | SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, |
| 7294 | SDVTList VTList, |
| 7295 | ArrayRef<SDValue> Ops, EVT MemVT, |
| 7296 | MachineMemOperand *MMO, |
| 7297 | SelectionDAG &DAG) const { |
| 7298 | EVT VT = VTList.VTs[0]; |
| 7299 | EVT WidenedVT = VT; |
| 7300 | EVT WidenedMemVT = MemVT; |
| 7301 | if (!Subtarget->hasDwordx3LoadStores() && |
| 7302 | (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { |
| 7303 | WidenedVT = EVT::getVectorVT(*DAG.getContext(), |
| 7304 | WidenedVT.getVectorElementType(), 4); |
| 7305 | WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), |
| 7306 | WidenedMemVT.getVectorElementType(), 4); |
| 7307 | MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); |
| 7308 | } |
| 7309 | |
| 7310 | assert(VTList.NumVTs == 2); |
| 7311 | SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); |
| 7312 | |
| 7313 | auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, |
| 7314 | WidenedMemVT, MMO); |
| 7315 | if (WidenedVT != VT) { |
| 7316 | auto = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, |
| 7317 | DAG.getVectorIdxConstant(0, DL)); |
| 7318 | NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); |
| 7319 | } |
| 7320 | return NewOp; |
| 7321 | } |
| 7322 | |
| 7323 | SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG, |
| 7324 | bool ImageStore) const { |
| 7325 | EVT StoreVT = VData.getValueType(); |
| 7326 | |
| 7327 | // No change for f16 and legal vector D16 types. |
| 7328 | if (!StoreVT.isVector()) |
| 7329 | return VData; |
| 7330 | |
| 7331 | SDLoc DL(VData); |
| 7332 | unsigned NumElements = StoreVT.getVectorNumElements(); |
| 7333 | |
| 7334 | if (Subtarget->hasUnpackedD16VMem()) { |
| 7335 | // We need to unpack the packed data to store. |
| 7336 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); |
| 7337 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
| 7338 | |
| 7339 | EVT EquivStoreVT = |
| 7340 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements); |
| 7341 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); |
| 7342 | return DAG.UnrollVectorOp(ZExt.getNode()); |
| 7343 | } |
| 7344 | |
| 7345 | // The sq block of gfx8.1 does not estimate register use correctly for d16 |
| 7346 | // image store instructions. The data operand is computed as if it were not a |
| 7347 | // d16 image instruction. |
| 7348 | if (ImageStore && Subtarget->hasImageStoreD16Bug()) { |
| 7349 | // Bitcast to i16 |
| 7350 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); |
| 7351 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
| 7352 | |
| 7353 | // Decompose into scalars |
| 7354 | SmallVector<SDValue, 4> Elts; |
| 7355 | DAG.ExtractVectorElements(IntVData, Elts); |
| 7356 | |
| 7357 | // Group pairs of i16 into v2i16 and bitcast to i32 |
| 7358 | SmallVector<SDValue, 4> PackedElts; |
| 7359 | for (unsigned I = 0; I < Elts.size() / 2; I += 1) { |
| 7360 | SDValue Pair = |
| 7361 | DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]}); |
| 7362 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); |
| 7363 | PackedElts.push_back(IntPair); |
| 7364 | } |
| 7365 | if ((NumElements % 2) == 1) { |
| 7366 | // Handle v3i16 |
| 7367 | unsigned I = Elts.size() / 2; |
| 7368 | SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL, |
| 7369 | {Elts[I * 2], DAG.getUNDEF(MVT::i16)}); |
| 7370 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); |
| 7371 | PackedElts.push_back(IntPair); |
| 7372 | } |
| 7373 | |
| 7374 | // Pad using UNDEF |
| 7375 | PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32)); |
| 7376 | |
| 7377 | // Build final vector |
| 7378 | EVT VecVT = |
| 7379 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size()); |
| 7380 | return DAG.getBuildVector(VecVT, DL, PackedElts); |
| 7381 | } |
| 7382 | |
| 7383 | if (NumElements == 3) { |
| 7384 | EVT IntStoreVT = |
| 7385 | EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits()); |
| 7386 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
| 7387 | |
| 7388 | EVT WidenedStoreVT = EVT::getVectorVT( |
| 7389 | *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1); |
| 7390 | EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(), |
| 7391 | WidenedStoreVT.getStoreSizeInBits()); |
| 7392 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData); |
| 7393 | return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt); |
| 7394 | } |
| 7395 | |
| 7396 | assert(isTypeLegal(StoreVT)); |
| 7397 | return VData; |
| 7398 | } |
| 7399 | |
| 7400 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
| 7401 | SelectionDAG &DAG) const { |
| 7402 | SDLoc DL(Op); |
| 7403 | SDValue Chain = Op.getOperand(0); |
| 7404 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
| 7405 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7406 | |
| 7407 | switch (IntrinsicID) { |
| 7408 | case Intrinsic::amdgcn_exp_compr: { |
| 7409 | SDValue Src0 = Op.getOperand(4); |
| 7410 | SDValue Src1 = Op.getOperand(5); |
| 7411 | // Hack around illegal type on SI by directly selecting it. |
| 7412 | if (isTypeLegal(Src0.getValueType())) |
| 7413 | return SDValue(); |
| 7414 | |
| 7415 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); |
| 7416 | SDValue Undef = DAG.getUNDEF(MVT::f32); |
| 7417 | const SDValue Ops[] = { |
| 7418 | Op.getOperand(2), // tgt |
| 7419 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0 |
| 7420 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1 |
| 7421 | Undef, // src2 |
| 7422 | Undef, // src3 |
| 7423 | Op.getOperand(7), // vm |
| 7424 | DAG.getTargetConstant(1, DL, MVT::i1), // compr |
| 7425 | Op.getOperand(3), // en |
| 7426 | Op.getOperand(0) // Chain |
| 7427 | }; |
| 7428 | |
| 7429 | unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE; |
| 7430 | return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); |
| 7431 | } |
| 7432 | case Intrinsic::amdgcn_s_barrier: { |
| 7433 | if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { |
| 7434 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
| 7435 | unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; |
| 7436 | if (WGSize <= ST.getWavefrontSize()) |
| 7437 | return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, |
| 7438 | Op.getOperand(0)), 0); |
| 7439 | } |
| 7440 | return SDValue(); |
| 7441 | }; |
| 7442 | case Intrinsic::amdgcn_tbuffer_store: { |
| 7443 | SDValue VData = Op.getOperand(2); |
| 7444 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
| 7445 | if (IsD16) |
| 7446 | VData = handleD16VData(VData, DAG); |
| 7447 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
| 7448 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
| 7449 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
| 7450 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); |
| 7451 | unsigned IdxEn = 1; |
| 7452 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
| 7453 | IdxEn = Idx->getZExtValue() != 0; |
| 7454 | SDValue Ops[] = { |
| 7455 | Chain, |
| 7456 | VData, // vdata |
| 7457 | Op.getOperand(3), // rsrc |
| 7458 | Op.getOperand(4), // vindex |
| 7459 | Op.getOperand(5), // voffset |
| 7460 | Op.getOperand(6), // soffset |
| 7461 | Op.getOperand(7), // offset |
| 7462 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format |
| 7463 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
| 7464 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idexen |
| 7465 | }; |
| 7466 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
| 7467 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
| 7468 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7469 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7470 | M->getMemoryVT(), M->getMemOperand()); |
| 7471 | } |
| 7472 | |
| 7473 | case Intrinsic::amdgcn_struct_tbuffer_store: { |
| 7474 | SDValue VData = Op.getOperand(2); |
| 7475 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
| 7476 | if (IsD16) |
| 7477 | VData = handleD16VData(VData, DAG); |
| 7478 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
| 7479 | SDValue Ops[] = { |
| 7480 | Chain, |
| 7481 | VData, // vdata |
| 7482 | Op.getOperand(3), // rsrc |
| 7483 | Op.getOperand(4), // vindex |
| 7484 | Offsets.first, // voffset |
| 7485 | Op.getOperand(6), // soffset |
| 7486 | Offsets.second, // offset |
| 7487 | Op.getOperand(7), // format |
| 7488 | Op.getOperand(8), // cachepolicy, swizzled buffer |
| 7489 | DAG.getTargetConstant(1, DL, MVT::i1), // idexen |
| 7490 | }; |
| 7491 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
| 7492 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
| 7493 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7494 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7495 | M->getMemoryVT(), M->getMemOperand()); |
| 7496 | } |
| 7497 | |
| 7498 | case Intrinsic::amdgcn_raw_tbuffer_store: { |
| 7499 | SDValue VData = Op.getOperand(2); |
| 7500 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
| 7501 | if (IsD16) |
| 7502 | VData = handleD16VData(VData, DAG); |
| 7503 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
| 7504 | SDValue Ops[] = { |
| 7505 | Chain, |
| 7506 | VData, // vdata |
| 7507 | Op.getOperand(3), // rsrc |
| 7508 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 7509 | Offsets.first, // voffset |
| 7510 | Op.getOperand(5), // soffset |
| 7511 | Offsets.second, // offset |
| 7512 | Op.getOperand(6), // format |
| 7513 | Op.getOperand(7), // cachepolicy, swizzled buffer |
| 7514 | DAG.getTargetConstant(0, DL, MVT::i1), // idexen |
| 7515 | }; |
| 7516 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
| 7517 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
| 7518 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7519 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7520 | M->getMemoryVT(), M->getMemOperand()); |
| 7521 | } |
| 7522 | |
| 7523 | case Intrinsic::amdgcn_buffer_store: |
| 7524 | case Intrinsic::amdgcn_buffer_store_format: { |
| 7525 | SDValue VData = Op.getOperand(2); |
| 7526 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
| 7527 | if (IsD16) |
| 7528 | VData = handleD16VData(VData, DAG); |
| 7529 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
| 7530 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
| 7531 | unsigned IdxEn = 1; |
| 7532 | if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4))) |
| 7533 | IdxEn = Idx->getZExtValue() != 0; |
| 7534 | SDValue Ops[] = { |
| 7535 | Chain, |
| 7536 | VData, |
| 7537 | Op.getOperand(3), // rsrc |
| 7538 | Op.getOperand(4), // vindex |
| 7539 | SDValue(), // voffset -- will be set by setBufferOffsets |
| 7540 | SDValue(), // soffset -- will be set by setBufferOffsets |
| 7541 | SDValue(), // offset -- will be set by setBufferOffsets |
| 7542 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy |
| 7543 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen |
| 7544 | }; |
| 7545 | unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
| 7546 | // We don't know the offset if vindex is non-zero, so clear it. |
| 7547 | if (IdxEn) |
| 7548 | Offset = 0; |
| 7549 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? |
| 7550 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
| 7551 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
| 7552 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7553 | M->getMemOperand()->setOffset(Offset); |
| 7554 | |
| 7555 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
| 7556 | EVT VDataType = VData.getValueType().getScalarType(); |
| 7557 | if (VDataType == MVT::i8 || VDataType == MVT::i16) |
| 7558 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
| 7559 | |
| 7560 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7561 | M->getMemoryVT(), M->getMemOperand()); |
| 7562 | } |
| 7563 | |
| 7564 | case Intrinsic::amdgcn_raw_buffer_store: |
| 7565 | case Intrinsic::amdgcn_raw_buffer_store_format: { |
| 7566 | const bool IsFormat = |
| 7567 | IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; |
| 7568 | |
| 7569 | SDValue VData = Op.getOperand(2); |
| 7570 | EVT VDataVT = VData.getValueType(); |
| 7571 | EVT EltType = VDataVT.getScalarType(); |
| 7572 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
| 7573 | if (IsD16) { |
| 7574 | VData = handleD16VData(VData, DAG); |
| 7575 | VDataVT = VData.getValueType(); |
| 7576 | } |
| 7577 | |
| 7578 | if (!isTypeLegal(VDataVT)) { |
| 7579 | VData = |
| 7580 | DAG.getNode(ISD::BITCAST, DL, |
| 7581 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
| 7582 | } |
| 7583 | |
| 7584 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
| 7585 | SDValue Ops[] = { |
| 7586 | Chain, |
| 7587 | VData, |
| 7588 | Op.getOperand(3), // rsrc |
| 7589 | DAG.getConstant(0, DL, MVT::i32), // vindex |
| 7590 | Offsets.first, // voffset |
| 7591 | Op.getOperand(5), // soffset |
| 7592 | Offsets.second, // offset |
| 7593 | Op.getOperand(6), // cachepolicy, swizzled buffer |
| 7594 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen |
| 7595 | }; |
| 7596 | unsigned Opc = |
| 7597 | IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; |
| 7598 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
| 7599 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7600 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); |
| 7601 | |
| 7602 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
| 7603 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
| 7604 | return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); |
| 7605 | |
| 7606 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7607 | M->getMemoryVT(), M->getMemOperand()); |
| 7608 | } |
| 7609 | |
| 7610 | case Intrinsic::amdgcn_struct_buffer_store: |
| 7611 | case Intrinsic::amdgcn_struct_buffer_store_format: { |
| 7612 | const bool IsFormat = |
| 7613 | IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; |
| 7614 | |
| 7615 | SDValue VData = Op.getOperand(2); |
| 7616 | EVT VDataVT = VData.getValueType(); |
| 7617 | EVT EltType = VDataVT.getScalarType(); |
| 7618 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
| 7619 | |
| 7620 | if (IsD16) { |
| 7621 | VData = handleD16VData(VData, DAG); |
| 7622 | VDataVT = VData.getValueType(); |
| 7623 | } |
| 7624 | |
| 7625 | if (!isTypeLegal(VDataVT)) { |
| 7626 | VData = |
| 7627 | DAG.getNode(ISD::BITCAST, DL, |
| 7628 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
| 7629 | } |
| 7630 | |
| 7631 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
| 7632 | SDValue Ops[] = { |
| 7633 | Chain, |
| 7634 | VData, |
| 7635 | Op.getOperand(3), // rsrc |
| 7636 | Op.getOperand(4), // vindex |
| 7637 | Offsets.first, // voffset |
| 7638 | Op.getOperand(6), // soffset |
| 7639 | Offsets.second, // offset |
| 7640 | Op.getOperand(7), // cachepolicy, swizzled buffer |
| 7641 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen |
| 7642 | }; |
| 7643 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? |
| 7644 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
| 7645 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
| 7646 | MemSDNode *M = cast<MemSDNode>(Op); |
| 7647 | M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], |
| 7648 | Ops[3])); |
| 7649 | |
| 7650 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics |
| 7651 | EVT VDataType = VData.getValueType().getScalarType(); |
| 7652 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
| 7653 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
| 7654 | |
| 7655 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
| 7656 | M->getMemoryVT(), M->getMemOperand()); |
| 7657 | } |
| 7658 | case Intrinsic::amdgcn_end_cf: |
| 7659 | return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, |
| 7660 | Op->getOperand(2), Chain), 0); |
| 7661 | |
| 7662 | default: { |
| 7663 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
| 7664 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
| 7665 | return lowerImage(Op, ImageDimIntr, DAG, true); |
| 7666 | |
| 7667 | return Op; |
| 7668 | } |
| 7669 | } |
| 7670 | } |
| 7671 | |
| 7672 | // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: |
| 7673 | // offset (the offset that is included in bounds checking and swizzling, to be |
| 7674 | // split between the instruction's voffset and immoffset fields) and soffset |
| 7675 | // (the offset that is excluded from bounds checking and swizzling, to go in |
| 7676 | // the instruction's soffset field). This function takes the first kind of |
| 7677 | // offset and figures out how to split it between voffset and immoffset. |
| 7678 | std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( |
| 7679 | SDValue Offset, SelectionDAG &DAG) const { |
| 7680 | SDLoc DL(Offset); |
| 7681 | const unsigned MaxImm = 4095; |
| 7682 | SDValue N0 = Offset; |
| 7683 | ConstantSDNode *C1 = nullptr; |
| 7684 | |
| 7685 | if ((C1 = dyn_cast<ConstantSDNode>(N0))) |
| 7686 | N0 = SDValue(); |
| 7687 | else if (DAG.isBaseWithConstantOffset(N0)) { |
| 7688 | C1 = cast<ConstantSDNode>(N0.getOperand(1)); |
| 7689 | N0 = N0.getOperand(0); |
| 7690 | } |
| 7691 | |
| 7692 | if (C1) { |
| 7693 | unsigned ImmOffset = C1->getZExtValue(); |
| 7694 | // If the immediate value is too big for the immoffset field, put the value |
| 7695 | // and -4096 into the immoffset field so that the value that is copied/added |
| 7696 | // for the voffset field is a multiple of 4096, and it stands more chance |
| 7697 | // of being CSEd with the copy/add for another similar load/store. |
| 7698 | // However, do not do that rounding down to a multiple of 4096 if that is a |
| 7699 | // negative number, as it appears to be illegal to have a negative offset |
| 7700 | // in the vgpr, even if adding the immediate offset makes it positive. |
| 7701 | unsigned Overflow = ImmOffset & ~MaxImm; |
| 7702 | ImmOffset -= Overflow; |
| 7703 | if ((int32_t)Overflow < 0) { |
| 7704 | Overflow += ImmOffset; |
| 7705 | ImmOffset = 0; |
| 7706 | } |
| 7707 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); |
| 7708 | if (Overflow) { |
| 7709 | auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); |
| 7710 | if (!N0) |
| 7711 | N0 = OverflowVal; |
| 7712 | else { |
| 7713 | SDValue Ops[] = { N0, OverflowVal }; |
| 7714 | N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); |
| 7715 | } |
| 7716 | } |
| 7717 | } |
| 7718 | if (!N0) |
| 7719 | N0 = DAG.getConstant(0, DL, MVT::i32); |
| 7720 | if (!C1) |
| 7721 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); |
| 7722 | return {N0, SDValue(C1, 0)}; |
| 7723 | } |
| 7724 | |
| 7725 | // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the |
| 7726 | // three offsets (voffset, soffset and instoffset) into the SDValue[3] array |
| 7727 | // pointed to by Offsets. |
| 7728 | unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset, |
| 7729 | SelectionDAG &DAG, SDValue *Offsets, |
| 7730 | Align Alignment) const { |
| 7731 | SDLoc DL(CombinedOffset); |
| 7732 | if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { |
| 7733 | uint32_t Imm = C->getZExtValue(); |
| 7734 | uint32_t SOffset, ImmOffset; |
| 7735 | if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, |
| 7736 | Alignment)) { |
| 7737 | Offsets[0] = DAG.getConstant(0, DL, MVT::i32); |
| 7738 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
| 7739 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
| 7740 | return SOffset + ImmOffset; |
| 7741 | } |
| 7742 | } |
| 7743 | if (DAG.isBaseWithConstantOffset(CombinedOffset)) { |
| 7744 | SDValue N0 = CombinedOffset.getOperand(0); |
| 7745 | SDValue N1 = CombinedOffset.getOperand(1); |
| 7746 | uint32_t SOffset, ImmOffset; |
| 7747 | int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); |
| 7748 | if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, |
| 7749 | Subtarget, Alignment)) { |
| 7750 | Offsets[0] = N0; |
| 7751 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
| 7752 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
| 7753 | return 0; |
| 7754 | } |
| 7755 | } |
| 7756 | Offsets[0] = CombinedOffset; |
| 7757 | Offsets[1] = DAG.getConstant(0, DL, MVT::i32); |
| 7758 | Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); |
| 7759 | return 0; |
| 7760 | } |
| 7761 | |
| 7762 | // Handle 8 bit and 16 bit buffer loads |
| 7763 | SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, |
| 7764 | EVT LoadVT, SDLoc DL, |
| 7765 | ArrayRef<SDValue> Ops, |
| 7766 | MemSDNode *M) const { |
| 7767 | EVT IntVT = LoadVT.changeTypeToInteger(); |
| 7768 | unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? |
| 7769 | AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; |
| 7770 | |
| 7771 | SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); |
| 7772 | SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, |
| 7773 | Ops, IntVT, |
| 7774 | M->getMemOperand()); |
| 7775 | SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); |
| 7776 | LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); |
| 7777 | |
| 7778 | return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); |
| 7779 | } |
| 7780 | |
| 7781 | // Handle 8 bit and 16 bit buffer stores |
| 7782 | SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, |
| 7783 | EVT VDataType, SDLoc DL, |
| 7784 | SDValue Ops[], |
| 7785 | MemSDNode *M) const { |
| 7786 | if (VDataType == MVT::f16) |
| 7787 | Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); |
| 7788 | |
| 7789 | SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); |
| 7790 | Ops[1] = BufferStoreExt; |
| 7791 | unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : |
| 7792 | AMDGPUISD::BUFFER_STORE_SHORT; |
| 7793 | ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); |
| 7794 | return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, |
| 7795 | M->getMemOperand()); |
| 7796 | } |
| 7797 | |
| 7798 | static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, |
| 7799 | ISD::LoadExtType ExtType, SDValue Op, |
| 7800 | const SDLoc &SL, EVT VT) { |
| 7801 | if (VT.bitsLT(Op.getValueType())) |
| 7802 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); |
| 7803 | |
| 7804 | switch (ExtType) { |
| 7805 | case ISD::SEXTLOAD: |
| 7806 | return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); |
| 7807 | case ISD::ZEXTLOAD: |
| 7808 | return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); |
| 7809 | case ISD::EXTLOAD: |
| 7810 | return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); |
| 7811 | case ISD::NON_EXTLOAD: |
| 7812 | return Op; |
| 7813 | } |
| 7814 | |
| 7815 | llvm_unreachable("invalid ext type" ); |
| 7816 | } |
| 7817 | |
| 7818 | SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { |
| 7819 | SelectionDAG &DAG = DCI.DAG; |
| 7820 | if (Ld->getAlignment() < 4 || Ld->isDivergent()) |
| 7821 | return SDValue(); |
| 7822 | |
| 7823 | // FIXME: Constant loads should all be marked invariant. |
| 7824 | unsigned AS = Ld->getAddressSpace(); |
| 7825 | if (AS != AMDGPUAS::CONSTANT_ADDRESS && |
| 7826 | AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && |
| 7827 | (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) |
| 7828 | return SDValue(); |
| 7829 | |
| 7830 | // Don't do this early, since it may interfere with adjacent load merging for |
| 7831 | // illegal types. We can avoid losing alignment information for exotic types |
| 7832 | // pre-legalize. |
| 7833 | EVT MemVT = Ld->getMemoryVT(); |
| 7834 | if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || |
| 7835 | MemVT.getSizeInBits() >= 32) |
| 7836 | return SDValue(); |
| 7837 | |
| 7838 | SDLoc SL(Ld); |
| 7839 | |
| 7840 | assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && |
| 7841 | "unexpected vector extload" ); |
| 7842 | |
| 7843 | // TODO: Drop only high part of range. |
| 7844 | SDValue Ptr = Ld->getBasePtr(); |
| 7845 | SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, |
| 7846 | MVT::i32, SL, Ld->getChain(), Ptr, |
| 7847 | Ld->getOffset(), |
| 7848 | Ld->getPointerInfo(), MVT::i32, |
| 7849 | Ld->getAlignment(), |
| 7850 | Ld->getMemOperand()->getFlags(), |
| 7851 | Ld->getAAInfo(), |
| 7852 | nullptr); // Drop ranges |
| 7853 | |
| 7854 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); |
| 7855 | if (MemVT.isFloatingPoint()) { |
| 7856 | assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && |
| 7857 | "unexpected fp extload" ); |
| 7858 | TruncVT = MemVT.changeTypeToInteger(); |
| 7859 | } |
| 7860 | |
| 7861 | SDValue Cvt = NewLoad; |
| 7862 | if (Ld->getExtensionType() == ISD::SEXTLOAD) { |
| 7863 | Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, |
| 7864 | DAG.getValueType(TruncVT)); |
| 7865 | } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || |
| 7866 | Ld->getExtensionType() == ISD::NON_EXTLOAD) { |
| 7867 | Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); |
| 7868 | } else { |
| 7869 | assert(Ld->getExtensionType() == ISD::EXTLOAD); |
| 7870 | } |
| 7871 | |
| 7872 | EVT VT = Ld->getValueType(0); |
| 7873 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); |
| 7874 | |
| 7875 | DCI.AddToWorklist(Cvt.getNode()); |
| 7876 | |
| 7877 | // We may need to handle exotic cases, such as i16->i64 extloads, so insert |
| 7878 | // the appropriate extension from the 32-bit load. |
| 7879 | Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); |
| 7880 | DCI.AddToWorklist(Cvt.getNode()); |
| 7881 | |
| 7882 | // Handle conversion back to floating point if necessary. |
| 7883 | Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); |
| 7884 | |
| 7885 | return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); |
| 7886 | } |
| 7887 | |
| 7888 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
| 7889 | SDLoc DL(Op); |
| 7890 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
| 7891 | ISD::LoadExtType ExtType = Load->getExtensionType(); |
| 7892 | EVT MemVT = Load->getMemoryVT(); |
| 7893 | |
| 7894 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { |
| 7895 | if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) |
| 7896 | return SDValue(); |
| 7897 | |
| 7898 | // FIXME: Copied from PPC |
| 7899 | // First, load into 32 bits, then truncate to 1 bit. |
| 7900 | |
| 7901 | SDValue Chain = Load->getChain(); |
| 7902 | SDValue BasePtr = Load->getBasePtr(); |
| 7903 | MachineMemOperand *MMO = Load->getMemOperand(); |
| 7904 | |
| 7905 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; |
| 7906 | |
| 7907 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, |
| 7908 | BasePtr, RealMemVT, MMO); |
| 7909 | |
| 7910 | if (!MemVT.isVector()) { |
| 7911 | SDValue Ops[] = { |
| 7912 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), |
| 7913 | NewLD.getValue(1) |
| 7914 | }; |
| 7915 | |
| 7916 | return DAG.getMergeValues(Ops, DL); |
| 7917 | } |
| 7918 | |
| 7919 | SmallVector<SDValue, 3> Elts; |
| 7920 | for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { |
| 7921 | SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, |
| 7922 | DAG.getConstant(I, DL, MVT::i32)); |
| 7923 | |
| 7924 | Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); |
| 7925 | } |
| 7926 | |
| 7927 | SDValue Ops[] = { |
| 7928 | DAG.getBuildVector(MemVT, DL, Elts), |
| 7929 | NewLD.getValue(1) |
| 7930 | }; |
| 7931 | |
| 7932 | return DAG.getMergeValues(Ops, DL); |
| 7933 | } |
| 7934 | |
| 7935 | if (!MemVT.isVector()) |
| 7936 | return SDValue(); |
| 7937 | |
| 7938 | assert(Op.getValueType().getVectorElementType() == MVT::i32 && |
| 7939 | "Custom lowering for non-i32 vectors hasn't been implemented." ); |
| 7940 | |
| 7941 | unsigned Alignment = Load->getAlignment(); |
| 7942 | unsigned AS = Load->getAddressSpace(); |
| 7943 | if (Subtarget->hasLDSMisalignedBug() && |
| 7944 | AS == AMDGPUAS::FLAT_ADDRESS && |
| 7945 | Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { |
| 7946 | return SplitVectorLoad(Op, DAG); |
| 7947 | } |
| 7948 | |
| 7949 | MachineFunction &MF = DAG.getMachineFunction(); |
| 7950 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 7951 | // If there is a possibilty that flat instruction access scratch memory |
| 7952 | // then we need to use the same legalization rules we use for private. |
| 7953 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
| 7954 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
| 7955 | AS = MFI->hasFlatScratchInit() ? |
| 7956 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
| 7957 | |
| 7958 | unsigned NumElements = MemVT.getVectorNumElements(); |
| 7959 | |
| 7960 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
| 7961 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { |
| 7962 | if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { |
| 7963 | if (MemVT.isPow2VectorType()) |
| 7964 | return SDValue(); |
| 7965 | return WidenOrSplitVectorLoad(Op, DAG); |
| 7966 | } |
| 7967 | // Non-uniform loads will be selected to MUBUF instructions, so they |
| 7968 | // have the same legalization requirements as global and private |
| 7969 | // loads. |
| 7970 | // |
| 7971 | } |
| 7972 | |
| 7973 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
| 7974 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
| 7975 | AS == AMDGPUAS::GLOBAL_ADDRESS) { |
| 7976 | if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && |
| 7977 | Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && |
| 7978 | Alignment >= 4 && NumElements < 32) { |
| 7979 | if (MemVT.isPow2VectorType()) |
| 7980 | return SDValue(); |
| 7981 | return WidenOrSplitVectorLoad(Op, DAG); |
| 7982 | } |
| 7983 | // Non-uniform loads will be selected to MUBUF instructions, so they |
| 7984 | // have the same legalization requirements as global and private |
| 7985 | // loads. |
| 7986 | // |
| 7987 | } |
| 7988 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
| 7989 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
| 7990 | AS == AMDGPUAS::GLOBAL_ADDRESS || |
| 7991 | AS == AMDGPUAS::FLAT_ADDRESS) { |
| 7992 | if (NumElements > 4) |
| 7993 | return SplitVectorLoad(Op, DAG); |
| 7994 | // v3 loads not supported on SI. |
| 7995 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
| 7996 | return WidenOrSplitVectorLoad(Op, DAG); |
| 7997 | |
| 7998 | // v3 and v4 loads are supported for private and global memory. |
| 7999 | return SDValue(); |
| 8000 | } |
| 8001 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 8002 | // Depending on the setting of the private_element_size field in the |
| 8003 | // resource descriptor, we can only make private accesses up to a certain |
| 8004 | // size. |
| 8005 | switch (Subtarget->getMaxPrivateElementSize()) { |
| 8006 | case 4: { |
| 8007 | SDValue Ops[2]; |
| 8008 | std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); |
| 8009 | return DAG.getMergeValues(Ops, DL); |
| 8010 | } |
| 8011 | case 8: |
| 8012 | if (NumElements > 2) |
| 8013 | return SplitVectorLoad(Op, DAG); |
| 8014 | return SDValue(); |
| 8015 | case 16: |
| 8016 | // Same as global/flat |
| 8017 | if (NumElements > 4) |
| 8018 | return SplitVectorLoad(Op, DAG); |
| 8019 | // v3 loads not supported on SI. |
| 8020 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
| 8021 | return WidenOrSplitVectorLoad(Op, DAG); |
| 8022 | |
| 8023 | return SDValue(); |
| 8024 | default: |
| 8025 | llvm_unreachable("unsupported private_element_size" ); |
| 8026 | } |
| 8027 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
| 8028 | // Use ds_read_b128 or ds_read_b96 when possible. |
| 8029 | if (Subtarget->hasDS96AndDS128() && |
| 8030 | ((Subtarget->useDS128() && MemVT.getStoreSize() == 16) || |
| 8031 | MemVT.getStoreSize() == 12) && |
| 8032 | allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS, |
| 8033 | Load->getAlign())) |
| 8034 | return SDValue(); |
| 8035 | |
| 8036 | if (NumElements > 2) |
| 8037 | return SplitVectorLoad(Op, DAG); |
| 8038 | |
| 8039 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base |
| 8040 | // address is negative, then the instruction is incorrectly treated as |
| 8041 | // out-of-bounds even if base + offsets is in bounds. Split vectorized |
| 8042 | // loads here to avoid emitting ds_read2_b32. We may re-combine the |
| 8043 | // load later in the SILoadStoreOptimizer. |
| 8044 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
| 8045 | NumElements == 2 && MemVT.getStoreSize() == 8 && |
| 8046 | Load->getAlignment() < 8) { |
| 8047 | return SplitVectorLoad(Op, DAG); |
| 8048 | } |
| 8049 | } |
| 8050 | |
| 8051 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
| 8052 | MemVT, *Load->getMemOperand())) { |
| 8053 | SDValue Ops[2]; |
| 8054 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); |
| 8055 | return DAG.getMergeValues(Ops, DL); |
| 8056 | } |
| 8057 | |
| 8058 | return SDValue(); |
| 8059 | } |
| 8060 | |
| 8061 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 8062 | EVT VT = Op.getValueType(); |
| 8063 | assert(VT.getSizeInBits() == 64); |
| 8064 | |
| 8065 | SDLoc DL(Op); |
| 8066 | SDValue Cond = Op.getOperand(0); |
| 8067 | |
| 8068 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); |
| 8069 | SDValue One = DAG.getConstant(1, DL, MVT::i32); |
| 8070 | |
| 8071 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
| 8072 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
| 8073 | |
| 8074 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
| 8075 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
| 8076 | |
| 8077 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
| 8078 | |
| 8079 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
| 8080 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
| 8081 | |
| 8082 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
| 8083 | |
| 8084 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); |
| 8085 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
| 8086 | } |
| 8087 | |
| 8088 | // Catch division cases where we can use shortcuts with rcp and rsq |
| 8089 | // instructions. |
| 8090 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, |
| 8091 | SelectionDAG &DAG) const { |
| 8092 | SDLoc SL(Op); |
| 8093 | SDValue LHS = Op.getOperand(0); |
| 8094 | SDValue RHS = Op.getOperand(1); |
| 8095 | EVT VT = Op.getValueType(); |
| 8096 | const SDNodeFlags Flags = Op->getFlags(); |
| 8097 | |
| 8098 | bool AllowInaccurateRcp = Flags.hasApproximateFuncs(); |
| 8099 | |
| 8100 | // Without !fpmath accuracy information, we can't do more because we don't |
| 8101 | // know exactly whether rcp is accurate enough to meet !fpmath requirement. |
| 8102 | if (!AllowInaccurateRcp) |
| 8103 | return SDValue(); |
| 8104 | |
| 8105 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
| 8106 | if (CLHS->isExactlyValue(1.0)) { |
| 8107 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to |
| 8108 | // the CI documentation has a worst case error of 1 ulp. |
| 8109 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to |
| 8110 | // use it as long as we aren't trying to use denormals. |
| 8111 | // |
| 8112 | // v_rcp_f16 and v_rsq_f16 DO support denormals. |
| 8113 | |
| 8114 | // 1.0 / sqrt(x) -> rsq(x) |
| 8115 | |
| 8116 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP |
| 8117 | // error seems really high at 2^29 ULP. |
| 8118 | if (RHS.getOpcode() == ISD::FSQRT) |
| 8119 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
| 8120 | |
| 8121 | // 1.0 / x -> rcp(x) |
| 8122 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
| 8123 | } |
| 8124 | |
| 8125 | // Same as for 1.0, but expand the sign out of the constant. |
| 8126 | if (CLHS->isExactlyValue(-1.0)) { |
| 8127 | // -1.0 / x -> rcp (fneg x) |
| 8128 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
| 8129 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); |
| 8130 | } |
| 8131 | } |
| 8132 | |
| 8133 | // Turn into multiply by the reciprocal. |
| 8134 | // x / y -> x * (1.0 / y) |
| 8135 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
| 8136 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); |
| 8137 | } |
| 8138 | |
| 8139 | SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op, |
| 8140 | SelectionDAG &DAG) const { |
| 8141 | SDLoc SL(Op); |
| 8142 | SDValue X = Op.getOperand(0); |
| 8143 | SDValue Y = Op.getOperand(1); |
| 8144 | EVT VT = Op.getValueType(); |
| 8145 | const SDNodeFlags Flags = Op->getFlags(); |
| 8146 | |
| 8147 | bool AllowInaccurateDiv = Flags.hasApproximateFuncs() || |
| 8148 | DAG.getTarget().Options.UnsafeFPMath; |
| 8149 | if (!AllowInaccurateDiv) |
| 8150 | return SDValue(); |
| 8151 | |
| 8152 | SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y); |
| 8153 | SDValue One = DAG.getConstantFP(1.0, SL, VT); |
| 8154 | |
| 8155 | SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y); |
| 8156 | SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); |
| 8157 | |
| 8158 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R); |
| 8159 | SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); |
| 8160 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R); |
| 8161 | SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R); |
| 8162 | SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X); |
| 8163 | return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret); |
| 8164 | } |
| 8165 | |
| 8166 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
| 8167 | EVT VT, SDValue A, SDValue B, SDValue GlueChain, |
| 8168 | SDNodeFlags Flags) { |
| 8169 | if (GlueChain->getNumValues() <= 1) { |
| 8170 | return DAG.getNode(Opcode, SL, VT, A, B, Flags); |
| 8171 | } |
| 8172 | |
| 8173 | assert(GlueChain->getNumValues() == 3); |
| 8174 | |
| 8175 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
| 8176 | switch (Opcode) { |
| 8177 | default: llvm_unreachable("no chain equivalent for opcode" ); |
| 8178 | case ISD::FMUL: |
| 8179 | Opcode = AMDGPUISD::FMUL_W_CHAIN; |
| 8180 | break; |
| 8181 | } |
| 8182 | |
| 8183 | return DAG.getNode(Opcode, SL, VTList, |
| 8184 | {GlueChain.getValue(1), A, B, GlueChain.getValue(2)}, |
| 8185 | Flags); |
| 8186 | } |
| 8187 | |
| 8188 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
| 8189 | EVT VT, SDValue A, SDValue B, SDValue C, |
| 8190 | SDValue GlueChain, SDNodeFlags Flags) { |
| 8191 | if (GlueChain->getNumValues() <= 1) { |
| 8192 | return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags); |
| 8193 | } |
| 8194 | |
| 8195 | assert(GlueChain->getNumValues() == 3); |
| 8196 | |
| 8197 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
| 8198 | switch (Opcode) { |
| 8199 | default: llvm_unreachable("no chain equivalent for opcode" ); |
| 8200 | case ISD::FMA: |
| 8201 | Opcode = AMDGPUISD::FMA_W_CHAIN; |
| 8202 | break; |
| 8203 | } |
| 8204 | |
| 8205 | return DAG.getNode(Opcode, SL, VTList, |
| 8206 | {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)}, |
| 8207 | Flags); |
| 8208 | } |
| 8209 | |
| 8210 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { |
| 8211 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
| 8212 | return FastLowered; |
| 8213 | |
| 8214 | SDLoc SL(Op); |
| 8215 | SDValue Src0 = Op.getOperand(0); |
| 8216 | SDValue Src1 = Op.getOperand(1); |
| 8217 | |
| 8218 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
| 8219 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
| 8220 | |
| 8221 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); |
| 8222 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); |
| 8223 | |
| 8224 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); |
| 8225 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); |
| 8226 | |
| 8227 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); |
| 8228 | } |
| 8229 | |
| 8230 | // Faster 2.5 ULP division that does not support denormals. |
| 8231 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { |
| 8232 | SDLoc SL(Op); |
| 8233 | SDValue LHS = Op.getOperand(1); |
| 8234 | SDValue RHS = Op.getOperand(2); |
| 8235 | |
| 8236 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
| 8237 | |
| 8238 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
| 8239 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); |
| 8240 | |
| 8241 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
| 8242 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); |
| 8243 | |
| 8244 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
| 8245 | |
| 8246 | EVT SetCCVT = |
| 8247 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); |
| 8248 | |
| 8249 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
| 8250 | |
| 8251 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
| 8252 | |
| 8253 | // TODO: Should this propagate fast-math-flags? |
| 8254 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
| 8255 | |
| 8256 | // rcp does not support denormals. |
| 8257 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
| 8258 | |
| 8259 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
| 8260 | |
| 8261 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
| 8262 | } |
| 8263 | |
| 8264 | // Returns immediate value for setting the F32 denorm mode when using the |
| 8265 | // S_DENORM_MODE instruction. |
| 8266 | static const SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, |
| 8267 | const SDLoc &SL, const GCNSubtarget *ST) { |
| 8268 | assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE" ); |
| 8269 | int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) |
| 8270 | ? FP_DENORM_FLUSH_NONE |
| 8271 | : FP_DENORM_FLUSH_IN_FLUSH_OUT; |
| 8272 | |
| 8273 | int Mode = SPDenormMode | (DPDenormModeDefault << 2); |
| 8274 | return DAG.getTargetConstant(Mode, SL, MVT::i32); |
| 8275 | } |
| 8276 | |
| 8277 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
| 8278 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
| 8279 | return FastLowered; |
| 8280 | |
| 8281 | // The selection matcher assumes anything with a chain selecting to a |
| 8282 | // mayRaiseFPException machine instruction. Since we're introducing a chain |
| 8283 | // here, we need to explicitly report nofpexcept for the regular fdiv |
| 8284 | // lowering. |
| 8285 | SDNodeFlags Flags = Op->getFlags(); |
| 8286 | Flags.setNoFPExcept(true); |
| 8287 | |
| 8288 | SDLoc SL(Op); |
| 8289 | SDValue LHS = Op.getOperand(0); |
| 8290 | SDValue RHS = Op.getOperand(1); |
| 8291 | |
| 8292 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
| 8293 | |
| 8294 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); |
| 8295 | |
| 8296 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
| 8297 | {RHS, RHS, LHS}, Flags); |
| 8298 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
| 8299 | {LHS, RHS, LHS}, Flags); |
| 8300 | |
| 8301 | // Denominator is scaled to not be denormal, so using rcp is ok. |
| 8302 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, |
| 8303 | DenominatorScaled, Flags); |
| 8304 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, |
| 8305 | DenominatorScaled, Flags); |
| 8306 | |
| 8307 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | |
| 8308 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | |
| 8309 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); |
| 8310 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32); |
| 8311 | |
| 8312 | const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); |
| 8313 | |
| 8314 | if (!HasFP32Denormals) { |
| 8315 | // Note we can't use the STRICT_FMA/STRICT_FMUL for the non-strict FDIV |
| 8316 | // lowering. The chain dependence is insufficient, and we need glue. We do |
| 8317 | // not need the glue variants in a strictfp function. |
| 8318 | |
| 8319 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); |
| 8320 | |
| 8321 | SDNode *EnableDenorm; |
| 8322 | if (Subtarget->hasDenormModeInst()) { |
| 8323 | const SDValue EnableDenormValue = |
| 8324 | getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget); |
| 8325 | |
| 8326 | EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, |
| 8327 | DAG.getEntryNode(), EnableDenormValue).getNode(); |
| 8328 | } else { |
| 8329 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, |
| 8330 | SL, MVT::i32); |
| 8331 | EnableDenorm = |
| 8332 | DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs, |
| 8333 | {EnableDenormValue, BitField, DAG.getEntryNode()}); |
| 8334 | } |
| 8335 | |
| 8336 | SDValue Ops[3] = { |
| 8337 | NegDivScale0, |
| 8338 | SDValue(EnableDenorm, 0), |
| 8339 | SDValue(EnableDenorm, 1) |
| 8340 | }; |
| 8341 | |
| 8342 | NegDivScale0 = DAG.getMergeValues(Ops, SL); |
| 8343 | } |
| 8344 | |
| 8345 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, |
| 8346 | ApproxRcp, One, NegDivScale0, Flags); |
| 8347 | |
| 8348 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, |
| 8349 | ApproxRcp, Fma0, Flags); |
| 8350 | |
| 8351 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, |
| 8352 | Fma1, Fma1, Flags); |
| 8353 | |
| 8354 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, |
| 8355 | NumeratorScaled, Mul, Flags); |
| 8356 | |
| 8357 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, |
| 8358 | Fma2, Fma1, Mul, Fma2, Flags); |
| 8359 | |
| 8360 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, |
| 8361 | NumeratorScaled, Fma3, Flags); |
| 8362 | |
| 8363 | if (!HasFP32Denormals) { |
| 8364 | SDNode *DisableDenorm; |
| 8365 | if (Subtarget->hasDenormModeInst()) { |
| 8366 | const SDValue DisableDenormValue = |
| 8367 | getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget); |
| 8368 | |
| 8369 | DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, |
| 8370 | Fma4.getValue(1), DisableDenormValue, |
| 8371 | Fma4.getValue(2)).getNode(); |
| 8372 | } else { |
| 8373 | const SDValue DisableDenormValue = |
| 8374 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); |
| 8375 | |
| 8376 | DisableDenorm = DAG.getMachineNode( |
| 8377 | AMDGPU::S_SETREG_B32, SL, MVT::Other, |
| 8378 | {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)}); |
| 8379 | } |
| 8380 | |
| 8381 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, |
| 8382 | SDValue(DisableDenorm, 0), DAG.getRoot()); |
| 8383 | DAG.setRoot(OutputChain); |
| 8384 | } |
| 8385 | |
| 8386 | SDValue Scale = NumeratorScaled.getValue(1); |
| 8387 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, |
| 8388 | {Fma4, Fma1, Fma3, Scale}, Flags); |
| 8389 | |
| 8390 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags); |
| 8391 | } |
| 8392 | |
| 8393 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
| 8394 | if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG)) |
| 8395 | return FastLowered; |
| 8396 | |
| 8397 | SDLoc SL(Op); |
| 8398 | SDValue X = Op.getOperand(0); |
| 8399 | SDValue Y = Op.getOperand(1); |
| 8400 | |
| 8401 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); |
| 8402 | |
| 8403 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); |
| 8404 | |
| 8405 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); |
| 8406 | |
| 8407 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); |
| 8408 | |
| 8409 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); |
| 8410 | |
| 8411 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); |
| 8412 | |
| 8413 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); |
| 8414 | |
| 8415 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); |
| 8416 | |
| 8417 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); |
| 8418 | |
| 8419 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); |
| 8420 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); |
| 8421 | |
| 8422 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, |
| 8423 | NegDivScale0, Mul, DivScale1); |
| 8424 | |
| 8425 | SDValue Scale; |
| 8426 | |
| 8427 | if (!Subtarget->hasUsableDivScaleConditionOutput()) { |
| 8428 | // Workaround a hardware bug on SI where the condition output from div_scale |
| 8429 | // is not usable. |
| 8430 | |
| 8431 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); |
| 8432 | |
| 8433 | // Figure out if the scale to use for div_fmas. |
| 8434 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); |
| 8435 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); |
| 8436 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); |
| 8437 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); |
| 8438 | |
| 8439 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); |
| 8440 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); |
| 8441 | |
| 8442 | SDValue Scale0Hi |
| 8443 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); |
| 8444 | SDValue Scale1Hi |
| 8445 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); |
| 8446 | |
| 8447 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); |
| 8448 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); |
| 8449 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); |
| 8450 | } else { |
| 8451 | Scale = DivScale1.getValue(1); |
| 8452 | } |
| 8453 | |
| 8454 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, |
| 8455 | Fma4, Fma3, Mul, Scale); |
| 8456 | |
| 8457 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); |
| 8458 | } |
| 8459 | |
| 8460 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
| 8461 | EVT VT = Op.getValueType(); |
| 8462 | |
| 8463 | if (VT == MVT::f32) |
| 8464 | return LowerFDIV32(Op, DAG); |
| 8465 | |
| 8466 | if (VT == MVT::f64) |
| 8467 | return LowerFDIV64(Op, DAG); |
| 8468 | |
| 8469 | if (VT == MVT::f16) |
| 8470 | return LowerFDIV16(Op, DAG); |
| 8471 | |
| 8472 | llvm_unreachable("Unexpected type for fdiv" ); |
| 8473 | } |
| 8474 | |
| 8475 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
| 8476 | SDLoc DL(Op); |
| 8477 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
| 8478 | EVT VT = Store->getMemoryVT(); |
| 8479 | |
| 8480 | if (VT == MVT::i1) { |
| 8481 | return DAG.getTruncStore(Store->getChain(), DL, |
| 8482 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
| 8483 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
| 8484 | } |
| 8485 | |
| 8486 | assert(VT.isVector() && |
| 8487 | Store->getValue().getValueType().getScalarType() == MVT::i32); |
| 8488 | |
| 8489 | unsigned AS = Store->getAddressSpace(); |
| 8490 | if (Subtarget->hasLDSMisalignedBug() && |
| 8491 | AS == AMDGPUAS::FLAT_ADDRESS && |
| 8492 | Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { |
| 8493 | return SplitVectorStore(Op, DAG); |
| 8494 | } |
| 8495 | |
| 8496 | MachineFunction &MF = DAG.getMachineFunction(); |
| 8497 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 8498 | // If there is a possibilty that flat instruction access scratch memory |
| 8499 | // then we need to use the same legalization rules we use for private. |
| 8500 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
| 8501 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
| 8502 | AS = MFI->hasFlatScratchInit() ? |
| 8503 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
| 8504 | |
| 8505 | unsigned NumElements = VT.getVectorNumElements(); |
| 8506 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || |
| 8507 | AS == AMDGPUAS::FLAT_ADDRESS) { |
| 8508 | if (NumElements > 4) |
| 8509 | return SplitVectorStore(Op, DAG); |
| 8510 | // v3 stores not supported on SI. |
| 8511 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
| 8512 | return SplitVectorStore(Op, DAG); |
| 8513 | |
| 8514 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
| 8515 | VT, *Store->getMemOperand())) |
| 8516 | return expandUnalignedStore(Store, DAG); |
| 8517 | |
| 8518 | return SDValue(); |
| 8519 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
| 8520 | switch (Subtarget->getMaxPrivateElementSize()) { |
| 8521 | case 4: |
| 8522 | return scalarizeVectorStore(Store, DAG); |
| 8523 | case 8: |
| 8524 | if (NumElements > 2) |
| 8525 | return SplitVectorStore(Op, DAG); |
| 8526 | return SDValue(); |
| 8527 | case 16: |
| 8528 | if (NumElements > 4 || |
| 8529 | (NumElements == 3 && !Subtarget->enableFlatScratch())) |
| 8530 | return SplitVectorStore(Op, DAG); |
| 8531 | return SDValue(); |
| 8532 | default: |
| 8533 | llvm_unreachable("unsupported private_element_size" ); |
| 8534 | } |
| 8535 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
| 8536 | // Use ds_write_b128 or ds_write_b96 when possible. |
| 8537 | if (Subtarget->hasDS96AndDS128() && |
| 8538 | ((Subtarget->useDS128() && VT.getStoreSize() == 16) || |
| 8539 | (VT.getStoreSize() == 12)) && |
| 8540 | allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS, |
| 8541 | Store->getAlign())) |
| 8542 | return SDValue(); |
| 8543 | |
| 8544 | if (NumElements > 2) |
| 8545 | return SplitVectorStore(Op, DAG); |
| 8546 | |
| 8547 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base |
| 8548 | // address is negative, then the instruction is incorrectly treated as |
| 8549 | // out-of-bounds even if base + offsets is in bounds. Split vectorized |
| 8550 | // stores here to avoid emitting ds_write2_b32. We may re-combine the |
| 8551 | // store later in the SILoadStoreOptimizer. |
| 8552 | if (!Subtarget->hasUsableDSOffset() && |
| 8553 | NumElements == 2 && VT.getStoreSize() == 8 && |
| 8554 | Store->getAlignment() < 8) { |
| 8555 | return SplitVectorStore(Op, DAG); |
| 8556 | } |
| 8557 | |
| 8558 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
| 8559 | VT, *Store->getMemOperand())) { |
| 8560 | if (VT.isVector()) |
| 8561 | return SplitVectorStore(Op, DAG); |
| 8562 | return expandUnalignedStore(Store, DAG); |
| 8563 | } |
| 8564 | |
| 8565 | return SDValue(); |
| 8566 | } else { |
| 8567 | llvm_unreachable("unhandled address space" ); |
| 8568 | } |
| 8569 | } |
| 8570 | |
| 8571 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
| 8572 | SDLoc DL(Op); |
| 8573 | EVT VT = Op.getValueType(); |
| 8574 | SDValue Arg = Op.getOperand(0); |
| 8575 | SDValue TrigVal; |
| 8576 | |
| 8577 | // Propagate fast-math flags so that the multiply we introduce can be folded |
| 8578 | // if Arg is already the result of a multiply by constant. |
| 8579 | auto Flags = Op->getFlags(); |
| 8580 | |
| 8581 | SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT); |
| 8582 | |
| 8583 | if (Subtarget->hasTrigReducedRange()) { |
| 8584 | SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); |
| 8585 | TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags); |
| 8586 | } else { |
| 8587 | TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); |
| 8588 | } |
| 8589 | |
| 8590 | switch (Op.getOpcode()) { |
| 8591 | case ISD::FCOS: |
| 8592 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags); |
| 8593 | case ISD::FSIN: |
| 8594 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags); |
| 8595 | default: |
| 8596 | llvm_unreachable("Wrong trig opcode" ); |
| 8597 | } |
| 8598 | } |
| 8599 | |
| 8600 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { |
| 8601 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); |
| 8602 | assert(AtomicNode->isCompareAndSwap()); |
| 8603 | unsigned AS = AtomicNode->getAddressSpace(); |
| 8604 | |
| 8605 | // No custom lowering required for local address space |
| 8606 | if (!AMDGPU::isFlatGlobalAddrSpace(AS)) |
| 8607 | return Op; |
| 8608 | |
| 8609 | // Non-local address space requires custom lowering for atomic compare |
| 8610 | // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 |
| 8611 | SDLoc DL(Op); |
| 8612 | SDValue ChainIn = Op.getOperand(0); |
| 8613 | SDValue Addr = Op.getOperand(1); |
| 8614 | SDValue Old = Op.getOperand(2); |
| 8615 | SDValue New = Op.getOperand(3); |
| 8616 | EVT VT = Op.getValueType(); |
| 8617 | MVT SimpleVT = VT.getSimpleVT(); |
| 8618 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); |
| 8619 | |
| 8620 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); |
| 8621 | SDValue Ops[] = { ChainIn, Addr, NewOld }; |
| 8622 | |
| 8623 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), |
| 8624 | Ops, VT, AtomicNode->getMemOperand()); |
| 8625 | } |
| 8626 | |
| 8627 | //===----------------------------------------------------------------------===// |
| 8628 | // Custom DAG optimizations |
| 8629 | //===----------------------------------------------------------------------===// |
| 8630 | |
| 8631 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
| 8632 | DAGCombinerInfo &DCI) const { |
| 8633 | EVT VT = N->getValueType(0); |
| 8634 | EVT ScalarVT = VT.getScalarType(); |
| 8635 | if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16) |
| 8636 | return SDValue(); |
| 8637 | |
| 8638 | SelectionDAG &DAG = DCI.DAG; |
| 8639 | SDLoc DL(N); |
| 8640 | |
| 8641 | SDValue Src = N->getOperand(0); |
| 8642 | EVT SrcVT = Src.getValueType(); |
| 8643 | |
| 8644 | // TODO: We could try to match extracting the higher bytes, which would be |
| 8645 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after |
| 8646 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry |
| 8647 | // about in practice. |
| 8648 | if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { |
| 8649 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
| 8650 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src); |
| 8651 | DCI.AddToWorklist(Cvt.getNode()); |
| 8652 | |
| 8653 | // For the f16 case, fold to a cast to f32 and then cast back to f16. |
| 8654 | if (ScalarVT != MVT::f32) { |
| 8655 | Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt, |
| 8656 | DAG.getTargetConstant(0, DL, MVT::i32)); |
| 8657 | } |
| 8658 | return Cvt; |
| 8659 | } |
| 8660 | } |
| 8661 | |
| 8662 | return SDValue(); |
| 8663 | } |
| 8664 | |
| 8665 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) |
| 8666 | |
| 8667 | // This is a variant of |
| 8668 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), |
| 8669 | // |
| 8670 | // The normal DAG combiner will do this, but only if the add has one use since |
| 8671 | // that would increase the number of instructions. |
| 8672 | // |
| 8673 | // This prevents us from seeing a constant offset that can be folded into a |
| 8674 | // memory instruction's addressing mode. If we know the resulting add offset of |
| 8675 | // a pointer can be folded into an addressing offset, we can replace the pointer |
| 8676 | // operand with the add of new constant offset. This eliminates one of the uses, |
| 8677 | // and may allow the remaining use to also be simplified. |
| 8678 | // |
| 8679 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
| 8680 | unsigned AddrSpace, |
| 8681 | EVT MemVT, |
| 8682 | DAGCombinerInfo &DCI) const { |
| 8683 | SDValue N0 = N->getOperand(0); |
| 8684 | SDValue N1 = N->getOperand(1); |
| 8685 | |
| 8686 | // We only do this to handle cases where it's profitable when there are |
| 8687 | // multiple uses of the add, so defer to the standard combine. |
| 8688 | if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || |
| 8689 | N0->hasOneUse()) |
| 8690 | return SDValue(); |
| 8691 | |
| 8692 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
| 8693 | if (!CN1) |
| 8694 | return SDValue(); |
| 8695 | |
| 8696 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| 8697 | if (!CAdd) |
| 8698 | return SDValue(); |
| 8699 | |
| 8700 | // If the resulting offset is too large, we can't fold it into the addressing |
| 8701 | // mode offset. |
| 8702 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
| 8703 | Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); |
| 8704 | |
| 8705 | AddrMode AM; |
| 8706 | AM.HasBaseReg = true; |
| 8707 | AM.BaseOffs = Offset.getSExtValue(); |
| 8708 | if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) |
| 8709 | return SDValue(); |
| 8710 | |
| 8711 | SelectionDAG &DAG = DCI.DAG; |
| 8712 | SDLoc SL(N); |
| 8713 | EVT VT = N->getValueType(0); |
| 8714 | |
| 8715 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
| 8716 | SDValue COffset = DAG.getConstant(Offset, SL, VT); |
| 8717 | |
| 8718 | SDNodeFlags Flags; |
| 8719 | Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && |
| 8720 | (N0.getOpcode() == ISD::OR || |
| 8721 | N0->getFlags().hasNoUnsignedWrap())); |
| 8722 | |
| 8723 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); |
| 8724 | } |
| 8725 | |
| 8726 | /// MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset |
| 8727 | /// by the chain and intrinsic ID. Theoretically we would also need to check the |
| 8728 | /// specific intrinsic, but they all place the pointer operand first. |
| 8729 | static unsigned getBasePtrIndex(const MemSDNode *N) { |
| 8730 | switch (N->getOpcode()) { |
| 8731 | case ISD::STORE: |
| 8732 | case ISD::INTRINSIC_W_CHAIN: |
| 8733 | case ISD::INTRINSIC_VOID: |
| 8734 | return 2; |
| 8735 | default: |
| 8736 | return 1; |
| 8737 | } |
| 8738 | } |
| 8739 | |
| 8740 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, |
| 8741 | DAGCombinerInfo &DCI) const { |
| 8742 | SelectionDAG &DAG = DCI.DAG; |
| 8743 | SDLoc SL(N); |
| 8744 | |
| 8745 | unsigned PtrIdx = getBasePtrIndex(N); |
| 8746 | SDValue Ptr = N->getOperand(PtrIdx); |
| 8747 | |
| 8748 | // TODO: We could also do this for multiplies. |
| 8749 | if (Ptr.getOpcode() == ISD::SHL) { |
| 8750 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), |
| 8751 | N->getMemoryVT(), DCI); |
| 8752 | if (NewPtr) { |
| 8753 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); |
| 8754 | |
| 8755 | NewOps[PtrIdx] = NewPtr; |
| 8756 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); |
| 8757 | } |
| 8758 | } |
| 8759 | |
| 8760 | return SDValue(); |
| 8761 | } |
| 8762 | |
| 8763 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { |
| 8764 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || |
| 8765 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || |
| 8766 | (Opc == ISD::XOR && Val == 0); |
| 8767 | } |
| 8768 | |
| 8769 | // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This |
| 8770 | // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit |
| 8771 | // integer combine opportunities since most 64-bit operations are decomposed |
| 8772 | // this way. TODO: We won't want this for SALU especially if it is an inline |
| 8773 | // immediate. |
| 8774 | SDValue SITargetLowering::splitBinaryBitConstantOp( |
| 8775 | DAGCombinerInfo &DCI, |
| 8776 | const SDLoc &SL, |
| 8777 | unsigned Opc, SDValue LHS, |
| 8778 | const ConstantSDNode *CRHS) const { |
| 8779 | uint64_t Val = CRHS->getZExtValue(); |
| 8780 | uint32_t ValLo = Lo_32(Val); |
| 8781 | uint32_t ValHi = Hi_32(Val); |
| 8782 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 8783 | |
| 8784 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || |
| 8785 | bitOpWithConstantIsReducible(Opc, ValHi)) || |
| 8786 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { |
| 8787 | // If we need to materialize a 64-bit immediate, it will be split up later |
| 8788 | // anyway. Avoid creating the harder to understand 64-bit immediate |
| 8789 | // materialization. |
| 8790 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); |
| 8791 | } |
| 8792 | |
| 8793 | return SDValue(); |
| 8794 | } |
| 8795 | |
| 8796 | // Returns true if argument is a boolean value which is not serialized into |
| 8797 | // memory or argument and does not require v_cmdmask_b32 to be deserialized. |
| 8798 | static bool isBoolSGPR(SDValue V) { |
| 8799 | if (V.getValueType() != MVT::i1) |
| 8800 | return false; |
| 8801 | switch (V.getOpcode()) { |
| 8802 | default: break; |
| 8803 | case ISD::SETCC: |
| 8804 | case ISD::AND: |
| 8805 | case ISD::OR: |
| 8806 | case ISD::XOR: |
| 8807 | case AMDGPUISD::FP_CLASS: |
| 8808 | return true; |
| 8809 | } |
| 8810 | return false; |
| 8811 | } |
| 8812 | |
| 8813 | // If a constant has all zeroes or all ones within each byte return it. |
| 8814 | // Otherwise return 0. |
| 8815 | static uint32_t getConstantPermuteMask(uint32_t C) { |
| 8816 | // 0xff for any zero byte in the mask |
| 8817 | uint32_t ZeroByteMask = 0; |
| 8818 | if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; |
| 8819 | if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; |
| 8820 | if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; |
| 8821 | if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; |
| 8822 | uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte |
| 8823 | if ((NonZeroByteMask & C) != NonZeroByteMask) |
| 8824 | return 0; // Partial bytes selected. |
| 8825 | return C; |
| 8826 | } |
| 8827 | |
| 8828 | // Check if a node selects whole bytes from its operand 0 starting at a byte |
| 8829 | // boundary while masking the rest. Returns select mask as in the v_perm_b32 |
| 8830 | // or -1 if not succeeded. |
| 8831 | // Note byte select encoding: |
| 8832 | // value 0-3 selects corresponding source byte; |
| 8833 | // value 0xc selects zero; |
| 8834 | // value 0xff selects 0xff. |
| 8835 | static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { |
| 8836 | assert(V.getValueSizeInBits() == 32); |
| 8837 | |
| 8838 | if (V.getNumOperands() != 2) |
| 8839 | return ~0; |
| 8840 | |
| 8841 | ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); |
| 8842 | if (!N1) |
| 8843 | return ~0; |
| 8844 | |
| 8845 | uint32_t C = N1->getZExtValue(); |
| 8846 | |
| 8847 | switch (V.getOpcode()) { |
| 8848 | default: |
| 8849 | break; |
| 8850 | case ISD::AND: |
| 8851 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
| 8852 | return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); |
| 8853 | } |
| 8854 | break; |
| 8855 | |
| 8856 | case ISD::OR: |
| 8857 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
| 8858 | return (0x03020100 & ~ConstMask) | ConstMask; |
| 8859 | } |
| 8860 | break; |
| 8861 | |
| 8862 | case ISD::SHL: |
| 8863 | if (C % 8) |
| 8864 | return ~0; |
| 8865 | |
| 8866 | return uint32_t((0x030201000c0c0c0cull << C) >> 32); |
| 8867 | |
| 8868 | case ISD::SRL: |
| 8869 | if (C % 8) |
| 8870 | return ~0; |
| 8871 | |
| 8872 | return uint32_t(0x0c0c0c0c03020100ull >> C); |
| 8873 | } |
| 8874 | |
| 8875 | return ~0; |
| 8876 | } |
| 8877 | |
| 8878 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
| 8879 | DAGCombinerInfo &DCI) const { |
| 8880 | if (DCI.isBeforeLegalize()) |
| 8881 | return SDValue(); |
| 8882 | |
| 8883 | SelectionDAG &DAG = DCI.DAG; |
| 8884 | EVT VT = N->getValueType(0); |
| 8885 | SDValue LHS = N->getOperand(0); |
| 8886 | SDValue RHS = N->getOperand(1); |
| 8887 | |
| 8888 | |
| 8889 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
| 8890 | if (VT == MVT::i64 && CRHS) { |
| 8891 | if (SDValue Split |
| 8892 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) |
| 8893 | return Split; |
| 8894 | } |
| 8895 | |
| 8896 | if (CRHS && VT == MVT::i32) { |
| 8897 | // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb |
| 8898 | // nb = number of trailing zeroes in mask |
| 8899 | // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, |
| 8900 | // given that we are selecting 8 or 16 bit fields starting at byte boundary. |
| 8901 | uint64_t Mask = CRHS->getZExtValue(); |
| 8902 | unsigned Bits = countPopulation(Mask); |
| 8903 | if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && |
| 8904 | (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { |
| 8905 | if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { |
| 8906 | unsigned Shift = CShift->getZExtValue(); |
| 8907 | unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); |
| 8908 | unsigned Offset = NB + Shift; |
| 8909 | if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. |
| 8910 | SDLoc SL(N); |
| 8911 | SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, |
| 8912 | LHS->getOperand(0), |
| 8913 | DAG.getConstant(Offset, SL, MVT::i32), |
| 8914 | DAG.getConstant(Bits, SL, MVT::i32)); |
| 8915 | EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); |
| 8916 | SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, |
| 8917 | DAG.getValueType(NarrowVT)); |
| 8918 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, |
| 8919 | DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); |
| 8920 | return Shl; |
| 8921 | } |
| 8922 | } |
| 8923 | } |
| 8924 | |
| 8925 | // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) |
| 8926 | if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && |
| 8927 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
| 8928 | uint32_t Sel = getConstantPermuteMask(Mask); |
| 8929 | if (!Sel) |
| 8930 | return SDValue(); |
| 8931 | |
| 8932 | // Select 0xc for all zero bytes |
| 8933 | Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); |
| 8934 | SDLoc DL(N); |
| 8935 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
| 8936 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
| 8937 | } |
| 8938 | } |
| 8939 | |
| 8940 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> |
| 8941 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) |
| 8942 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { |
| 8943 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
| 8944 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
| 8945 | |
| 8946 | SDValue X = LHS.getOperand(0); |
| 8947 | SDValue Y = RHS.getOperand(0); |
| 8948 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
| 8949 | return SDValue(); |
| 8950 | |
| 8951 | if (LCC == ISD::SETO) { |
| 8952 | if (X != LHS.getOperand(1)) |
| 8953 | return SDValue(); |
| 8954 | |
| 8955 | if (RCC == ISD::SETUNE) { |
| 8956 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
| 8957 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
| 8958 | return SDValue(); |
| 8959 | |
| 8960 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
| 8961 | SIInstrFlags::N_SUBNORMAL | |
| 8962 | SIInstrFlags::N_ZERO | |
| 8963 | SIInstrFlags::P_ZERO | |
| 8964 | SIInstrFlags::P_SUBNORMAL | |
| 8965 | SIInstrFlags::P_NORMAL; |
| 8966 | |
| 8967 | static_assert(((~(SIInstrFlags::S_NAN | |
| 8968 | SIInstrFlags::Q_NAN | |
| 8969 | SIInstrFlags::N_INFINITY | |
| 8970 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
| 8971 | "mask not equal" ); |
| 8972 | |
| 8973 | SDLoc DL(N); |
| 8974 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
| 8975 | X, DAG.getConstant(Mask, DL, MVT::i32)); |
| 8976 | } |
| 8977 | } |
| 8978 | } |
| 8979 | |
| 8980 | if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) |
| 8981 | std::swap(LHS, RHS); |
| 8982 | |
| 8983 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && |
| 8984 | RHS.hasOneUse()) { |
| 8985 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
| 8986 | // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) |
| 8987 | // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) |
| 8988 | const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
| 8989 | if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && |
| 8990 | (RHS.getOperand(0) == LHS.getOperand(0) && |
| 8991 | LHS.getOperand(0) == LHS.getOperand(1))) { |
| 8992 | const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; |
| 8993 | unsigned NewMask = LCC == ISD::SETO ? |
| 8994 | Mask->getZExtValue() & ~OrdMask : |
| 8995 | Mask->getZExtValue() & OrdMask; |
| 8996 | |
| 8997 | SDLoc DL(N); |
| 8998 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), |
| 8999 | DAG.getConstant(NewMask, DL, MVT::i32)); |
| 9000 | } |
| 9001 | } |
| 9002 | |
| 9003 | if (VT == MVT::i32 && |
| 9004 | (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { |
| 9005 | // and x, (sext cc from i1) => select cc, x, 0 |
| 9006 | if (RHS.getOpcode() != ISD::SIGN_EXTEND) |
| 9007 | std::swap(LHS, RHS); |
| 9008 | if (isBoolSGPR(RHS.getOperand(0))) |
| 9009 | return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), |
| 9010 | LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); |
| 9011 | } |
| 9012 | |
| 9013 | // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) |
| 9014 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 9015 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
| 9016 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { |
| 9017 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
| 9018 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
| 9019 | if (LHSMask != ~0u && RHSMask != ~0u) { |
| 9020 | // Canonicalize the expression in an attempt to have fewer unique masks |
| 9021 | // and therefore fewer registers used to hold the masks. |
| 9022 | if (LHSMask > RHSMask) { |
| 9023 | std::swap(LHSMask, RHSMask); |
| 9024 | std::swap(LHS, RHS); |
| 9025 | } |
| 9026 | |
| 9027 | // Select 0xc for each lane used from source operand. Zero has 0xc mask |
| 9028 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. |
| 9029 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
| 9030 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
| 9031 | |
| 9032 | // Check of we need to combine values from two sources within a byte. |
| 9033 | if (!(LHSUsedLanes & RHSUsedLanes) && |
| 9034 | // If we select high and lower word keep it for SDWA. |
| 9035 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. |
| 9036 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
| 9037 | // Each byte in each mask is either selector mask 0-3, or has higher |
| 9038 | // bits set in either of masks, which can be 0xff for 0xff or 0x0c for |
| 9039 | // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise |
| 9040 | // mask which is not 0xff wins. By anding both masks we have a correct |
| 9041 | // result except that 0x0c shall be corrected to give 0x0c only. |
| 9042 | uint32_t Mask = LHSMask & RHSMask; |
| 9043 | for (unsigned I = 0; I < 32; I += 8) { |
| 9044 | uint32_t ByteSel = 0xff << I; |
| 9045 | if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) |
| 9046 | Mask &= (0x0c << I) & 0xffffffff; |
| 9047 | } |
| 9048 | |
| 9049 | // Add 4 to each active LHS lane. It will not affect any existing 0xff |
| 9050 | // or 0x0c. |
| 9051 | uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); |
| 9052 | SDLoc DL(N); |
| 9053 | |
| 9054 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
| 9055 | LHS.getOperand(0), RHS.getOperand(0), |
| 9056 | DAG.getConstant(Sel, DL, MVT::i32)); |
| 9057 | } |
| 9058 | } |
| 9059 | } |
| 9060 | |
| 9061 | return SDValue(); |
| 9062 | } |
| 9063 | |
| 9064 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
| 9065 | DAGCombinerInfo &DCI) const { |
| 9066 | SelectionDAG &DAG = DCI.DAG; |
| 9067 | SDValue LHS = N->getOperand(0); |
| 9068 | SDValue RHS = N->getOperand(1); |
| 9069 | |
| 9070 | EVT VT = N->getValueType(0); |
| 9071 | if (VT == MVT::i1) { |
| 9072 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) |
| 9073 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
| 9074 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
| 9075 | SDValue Src = LHS.getOperand(0); |
| 9076 | if (Src != RHS.getOperand(0)) |
| 9077 | return SDValue(); |
| 9078 | |
| 9079 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
| 9080 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
| 9081 | if (!CLHS || !CRHS) |
| 9082 | return SDValue(); |
| 9083 | |
| 9084 | // Only 10 bits are used. |
| 9085 | static const uint32_t MaxMask = 0x3ff; |
| 9086 | |
| 9087 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
| 9088 | SDLoc DL(N); |
| 9089 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
| 9090 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); |
| 9091 | } |
| 9092 | |
| 9093 | return SDValue(); |
| 9094 | } |
| 9095 | |
| 9096 | // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) |
| 9097 | if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && |
| 9098 | LHS.getOpcode() == AMDGPUISD::PERM && |
| 9099 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
| 9100 | uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); |
| 9101 | if (!Sel) |
| 9102 | return SDValue(); |
| 9103 | |
| 9104 | Sel |= LHS.getConstantOperandVal(2); |
| 9105 | SDLoc DL(N); |
| 9106 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
| 9107 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
| 9108 | } |
| 9109 | |
| 9110 | // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) |
| 9111 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 9112 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
| 9113 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { |
| 9114 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
| 9115 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
| 9116 | if (LHSMask != ~0u && RHSMask != ~0u) { |
| 9117 | // Canonicalize the expression in an attempt to have fewer unique masks |
| 9118 | // and therefore fewer registers used to hold the masks. |
| 9119 | if (LHSMask > RHSMask) { |
| 9120 | std::swap(LHSMask, RHSMask); |
| 9121 | std::swap(LHS, RHS); |
| 9122 | } |
| 9123 | |
| 9124 | // Select 0xc for each lane used from source operand. Zero has 0xc mask |
| 9125 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. |
| 9126 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
| 9127 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
| 9128 | |
| 9129 | // Check of we need to combine values from two sources within a byte. |
| 9130 | if (!(LHSUsedLanes & RHSUsedLanes) && |
| 9131 | // If we select high and lower word keep it for SDWA. |
| 9132 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. |
| 9133 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
| 9134 | // Kill zero bytes selected by other mask. Zero value is 0xc. |
| 9135 | LHSMask &= ~RHSUsedLanes; |
| 9136 | RHSMask &= ~LHSUsedLanes; |
| 9137 | // Add 4 to each active LHS lane |
| 9138 | LHSMask |= LHSUsedLanes & 0x04040404; |
| 9139 | // Combine masks |
| 9140 | uint32_t Sel = LHSMask | RHSMask; |
| 9141 | SDLoc DL(N); |
| 9142 | |
| 9143 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
| 9144 | LHS.getOperand(0), RHS.getOperand(0), |
| 9145 | DAG.getConstant(Sel, DL, MVT::i32)); |
| 9146 | } |
| 9147 | } |
| 9148 | } |
| 9149 | |
| 9150 | if (VT != MVT::i64 || DCI.isBeforeLegalizeOps()) |
| 9151 | return SDValue(); |
| 9152 | |
| 9153 | // TODO: This could be a generic combine with a predicate for extracting the |
| 9154 | // high half of an integer being free. |
| 9155 | |
| 9156 | // (or i64:x, (zero_extend i32:y)) -> |
| 9157 | // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) |
| 9158 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && |
| 9159 | RHS.getOpcode() != ISD::ZERO_EXTEND) |
| 9160 | std::swap(LHS, RHS); |
| 9161 | |
| 9162 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { |
| 9163 | SDValue ExtSrc = RHS.getOperand(0); |
| 9164 | EVT SrcVT = ExtSrc.getValueType(); |
| 9165 | if (SrcVT == MVT::i32) { |
| 9166 | SDLoc SL(N); |
| 9167 | SDValue LowLHS, HiBits; |
| 9168 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); |
| 9169 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); |
| 9170 | |
| 9171 | DCI.AddToWorklist(LowOr.getNode()); |
| 9172 | DCI.AddToWorklist(HiBits.getNode()); |
| 9173 | |
| 9174 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, |
| 9175 | LowOr, HiBits); |
| 9176 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); |
| 9177 | } |
| 9178 | } |
| 9179 | |
| 9180 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 9181 | if (CRHS) { |
| 9182 | if (SDValue Split |
| 9183 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) |
| 9184 | return Split; |
| 9185 | } |
| 9186 | |
| 9187 | return SDValue(); |
| 9188 | } |
| 9189 | |
| 9190 | SDValue SITargetLowering::performXorCombine(SDNode *N, |
| 9191 | DAGCombinerInfo &DCI) const { |
| 9192 | EVT VT = N->getValueType(0); |
| 9193 | if (VT != MVT::i64) |
| 9194 | return SDValue(); |
| 9195 | |
| 9196 | SDValue LHS = N->getOperand(0); |
| 9197 | SDValue RHS = N->getOperand(1); |
| 9198 | |
| 9199 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
| 9200 | if (CRHS) { |
| 9201 | if (SDValue Split |
| 9202 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) |
| 9203 | return Split; |
| 9204 | } |
| 9205 | |
| 9206 | return SDValue(); |
| 9207 | } |
| 9208 | |
| 9209 | // Instructions that will be lowered with a final instruction that zeros the |
| 9210 | // high result bits. |
| 9211 | // XXX - probably only need to list legal operations. |
| 9212 | static bool fp16SrcZerosHighBits(unsigned Opc) { |
| 9213 | switch (Opc) { |
| 9214 | case ISD::FADD: |
| 9215 | case ISD::FSUB: |
| 9216 | case ISD::FMUL: |
| 9217 | case ISD::FDIV: |
| 9218 | case ISD::FREM: |
| 9219 | case ISD::FMA: |
| 9220 | case ISD::FMAD: |
| 9221 | case ISD::FCANONICALIZE: |
| 9222 | case ISD::FP_ROUND: |
| 9223 | case ISD::UINT_TO_FP: |
| 9224 | case ISD::SINT_TO_FP: |
| 9225 | case ISD::FABS: |
| 9226 | // Fabs is lowered to a bit operation, but it's an and which will clear the |
| 9227 | // high bits anyway. |
| 9228 | case ISD::FSQRT: |
| 9229 | case ISD::FSIN: |
| 9230 | case ISD::FCOS: |
| 9231 | case ISD::FPOWI: |
| 9232 | case ISD::FPOW: |
| 9233 | case ISD::FLOG: |
| 9234 | case ISD::FLOG2: |
| 9235 | case ISD::FLOG10: |
| 9236 | case ISD::FEXP: |
| 9237 | case ISD::FEXP2: |
| 9238 | case ISD::FCEIL: |
| 9239 | case ISD::FTRUNC: |
| 9240 | case ISD::FRINT: |
| 9241 | case ISD::FNEARBYINT: |
| 9242 | case ISD::FROUND: |
| 9243 | case ISD::FFLOOR: |
| 9244 | case ISD::FMINNUM: |
| 9245 | case ISD::FMAXNUM: |
| 9246 | case AMDGPUISD::FRACT: |
| 9247 | case AMDGPUISD::CLAMP: |
| 9248 | case AMDGPUISD::COS_HW: |
| 9249 | case AMDGPUISD::SIN_HW: |
| 9250 | case AMDGPUISD::FMIN3: |
| 9251 | case AMDGPUISD::FMAX3: |
| 9252 | case AMDGPUISD::FMED3: |
| 9253 | case AMDGPUISD::FMAD_FTZ: |
| 9254 | case AMDGPUISD::RCP: |
| 9255 | case AMDGPUISD::RSQ: |
| 9256 | case AMDGPUISD::RCP_IFLAG: |
| 9257 | case AMDGPUISD::LDEXP: |
| 9258 | return true; |
| 9259 | default: |
| 9260 | // fcopysign, select and others may be lowered to 32-bit bit operations |
| 9261 | // which don't zero the high bits. |
| 9262 | return false; |
| 9263 | } |
| 9264 | } |
| 9265 | |
| 9266 | SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, |
| 9267 | DAGCombinerInfo &DCI) const { |
| 9268 | if (!Subtarget->has16BitInsts() || |
| 9269 | DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
| 9270 | return SDValue(); |
| 9271 | |
| 9272 | EVT VT = N->getValueType(0); |
| 9273 | if (VT != MVT::i32) |
| 9274 | return SDValue(); |
| 9275 | |
| 9276 | SDValue Src = N->getOperand(0); |
| 9277 | if (Src.getValueType() != MVT::i16) |
| 9278 | return SDValue(); |
| 9279 | |
| 9280 | // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src |
| 9281 | // FIXME: It is not universally true that the high bits are zeroed on gfx9. |
| 9282 | if (Src.getOpcode() == ISD::BITCAST) { |
| 9283 | SDValue BCSrc = Src.getOperand(0); |
| 9284 | if (BCSrc.getValueType() == MVT::f16 && |
| 9285 | fp16SrcZerosHighBits(BCSrc.getOpcode())) |
| 9286 | return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); |
| 9287 | } |
| 9288 | |
| 9289 | return SDValue(); |
| 9290 | } |
| 9291 | |
| 9292 | SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, |
| 9293 | DAGCombinerInfo &DCI) |
| 9294 | const { |
| 9295 | SDValue Src = N->getOperand(0); |
| 9296 | auto *VTSign = cast<VTSDNode>(N->getOperand(1)); |
| 9297 | |
| 9298 | if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && |
| 9299 | VTSign->getVT() == MVT::i8) || |
| 9300 | (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && |
| 9301 | VTSign->getVT() == MVT::i16)) && |
| 9302 | Src.hasOneUse()) { |
| 9303 | auto *M = cast<MemSDNode>(Src); |
| 9304 | SDValue Ops[] = { |
| 9305 | Src.getOperand(0), // Chain |
| 9306 | Src.getOperand(1), // rsrc |
| 9307 | Src.getOperand(2), // vindex |
| 9308 | Src.getOperand(3), // voffset |
| 9309 | Src.getOperand(4), // soffset |
| 9310 | Src.getOperand(5), // offset |
| 9311 | Src.getOperand(6), |
| 9312 | Src.getOperand(7) |
| 9313 | }; |
| 9314 | // replace with BUFFER_LOAD_BYTE/SHORT |
| 9315 | SDVTList ResList = DCI.DAG.getVTList(MVT::i32, |
| 9316 | Src.getOperand(0).getValueType()); |
| 9317 | unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? |
| 9318 | AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; |
| 9319 | SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), |
| 9320 | ResList, |
| 9321 | Ops, M->getMemoryVT(), |
| 9322 | M->getMemOperand()); |
| 9323 | return DCI.DAG.getMergeValues({BufferLoadSignExt, |
| 9324 | BufferLoadSignExt.getValue(1)}, SDLoc(N)); |
| 9325 | } |
| 9326 | return SDValue(); |
| 9327 | } |
| 9328 | |
| 9329 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
| 9330 | DAGCombinerInfo &DCI) const { |
| 9331 | SelectionDAG &DAG = DCI.DAG; |
| 9332 | SDValue Mask = N->getOperand(1); |
| 9333 | |
| 9334 | // fp_class x, 0 -> false |
| 9335 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
| 9336 | if (CMask->isNullValue()) |
| 9337 | return DAG.getConstant(0, SDLoc(N), MVT::i1); |
| 9338 | } |
| 9339 | |
| 9340 | if (N->getOperand(0).isUndef()) |
| 9341 | return DAG.getUNDEF(MVT::i1); |
| 9342 | |
| 9343 | return SDValue(); |
| 9344 | } |
| 9345 | |
| 9346 | SDValue SITargetLowering::performRcpCombine(SDNode *N, |
| 9347 | DAGCombinerInfo &DCI) const { |
| 9348 | EVT VT = N->getValueType(0); |
| 9349 | SDValue N0 = N->getOperand(0); |
| 9350 | |
| 9351 | if (N0.isUndef()) |
| 9352 | return N0; |
| 9353 | |
| 9354 | if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || |
| 9355 | N0.getOpcode() == ISD::SINT_TO_FP)) { |
| 9356 | return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, |
| 9357 | N->getFlags()); |
| 9358 | } |
| 9359 | |
| 9360 | if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { |
| 9361 | return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, |
| 9362 | N0.getOperand(0), N->getFlags()); |
| 9363 | } |
| 9364 | |
| 9365 | return AMDGPUTargetLowering::performRcpCombine(N, DCI); |
| 9366 | } |
| 9367 | |
| 9368 | bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, |
| 9369 | unsigned MaxDepth) const { |
| 9370 | unsigned Opcode = Op.getOpcode(); |
| 9371 | if (Opcode == ISD::FCANONICALIZE) |
| 9372 | return true; |
| 9373 | |
| 9374 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
| 9375 | auto F = CFP->getValueAPF(); |
| 9376 | if (F.isNaN() && F.isSignaling()) |
| 9377 | return false; |
| 9378 | return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); |
| 9379 | } |
| 9380 | |
| 9381 | // If source is a result of another standard FP operation it is already in |
| 9382 | // canonical form. |
| 9383 | if (MaxDepth == 0) |
| 9384 | return false; |
| 9385 | |
| 9386 | switch (Opcode) { |
| 9387 | // These will flush denorms if required. |
| 9388 | case ISD::FADD: |
| 9389 | case ISD::FSUB: |
| 9390 | case ISD::FMUL: |
| 9391 | case ISD::FCEIL: |
| 9392 | case ISD::FFLOOR: |
| 9393 | case ISD::FMA: |
| 9394 | case ISD::FMAD: |
| 9395 | case ISD::FSQRT: |
| 9396 | case ISD::FDIV: |
| 9397 | case ISD::FREM: |
| 9398 | case ISD::FP_ROUND: |
| 9399 | case ISD::FP_EXTEND: |
| 9400 | case AMDGPUISD::FMUL_LEGACY: |
| 9401 | case AMDGPUISD::FMAD_FTZ: |
| 9402 | case AMDGPUISD::RCP: |
| 9403 | case AMDGPUISD::RSQ: |
| 9404 | case AMDGPUISD::RSQ_CLAMP: |
| 9405 | case AMDGPUISD::RCP_LEGACY: |
| 9406 | case AMDGPUISD::RCP_IFLAG: |
| 9407 | case AMDGPUISD::DIV_SCALE: |
| 9408 | case AMDGPUISD::DIV_FMAS: |
| 9409 | case AMDGPUISD::DIV_FIXUP: |
| 9410 | case AMDGPUISD::FRACT: |
| 9411 | case AMDGPUISD::LDEXP: |
| 9412 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
| 9413 | case AMDGPUISD::CVT_F32_UBYTE0: |
| 9414 | case AMDGPUISD::CVT_F32_UBYTE1: |
| 9415 | case AMDGPUISD::CVT_F32_UBYTE2: |
| 9416 | case AMDGPUISD::CVT_F32_UBYTE3: |
| 9417 | return true; |
| 9418 | |
| 9419 | // It can/will be lowered or combined as a bit operation. |
| 9420 | // Need to check their input recursively to handle. |
| 9421 | case ISD::FNEG: |
| 9422 | case ISD::FABS: |
| 9423 | case ISD::FCOPYSIGN: |
| 9424 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
| 9425 | |
| 9426 | case ISD::FSIN: |
| 9427 | case ISD::FCOS: |
| 9428 | case ISD::FSINCOS: |
| 9429 | return Op.getValueType().getScalarType() != MVT::f16; |
| 9430 | |
| 9431 | case ISD::FMINNUM: |
| 9432 | case ISD::FMAXNUM: |
| 9433 | case ISD::FMINNUM_IEEE: |
| 9434 | case ISD::FMAXNUM_IEEE: |
| 9435 | case AMDGPUISD::CLAMP: |
| 9436 | case AMDGPUISD::FMED3: |
| 9437 | case AMDGPUISD::FMAX3: |
| 9438 | case AMDGPUISD::FMIN3: { |
| 9439 | // FIXME: Shouldn't treat the generic operations different based these. |
| 9440 | // However, we aren't really required to flush the result from |
| 9441 | // minnum/maxnum.. |
| 9442 | |
| 9443 | // snans will be quieted, so we only need to worry about denormals. |
| 9444 | if (Subtarget->supportsMinMaxDenormModes() || |
| 9445 | denormalsEnabledForType(DAG, Op.getValueType())) |
| 9446 | return true; |
| 9447 | |
| 9448 | // Flushing may be required. |
| 9449 | // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such |
| 9450 | // targets need to check their input recursively. |
| 9451 | |
| 9452 | // FIXME: Does this apply with clamp? It's implemented with max. |
| 9453 | for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { |
| 9454 | if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) |
| 9455 | return false; |
| 9456 | } |
| 9457 | |
| 9458 | return true; |
| 9459 | } |
| 9460 | case ISD::SELECT: { |
| 9461 | return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && |
| 9462 | isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); |
| 9463 | } |
| 9464 | case ISD::BUILD_VECTOR: { |
| 9465 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { |
| 9466 | SDValue SrcOp = Op.getOperand(i); |
| 9467 | if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) |
| 9468 | return false; |
| 9469 | } |
| 9470 | |
| 9471 | return true; |
| 9472 | } |
| 9473 | case ISD::EXTRACT_VECTOR_ELT: |
| 9474 | case ISD::EXTRACT_SUBVECTOR: { |
| 9475 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
| 9476 | } |
| 9477 | case ISD::INSERT_VECTOR_ELT: { |
| 9478 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && |
| 9479 | isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); |
| 9480 | } |
| 9481 | case ISD::UNDEF: |
| 9482 | // Could be anything. |
| 9483 | return false; |
| 9484 | |
| 9485 | case ISD::BITCAST: { |
| 9486 | // Hack round the mess we make when legalizing extract_vector_elt |
| 9487 | SDValue Src = Op.getOperand(0); |
| 9488 | if (Src.getValueType() == MVT::i16 && |
| 9489 | Src.getOpcode() == ISD::TRUNCATE) { |
| 9490 | SDValue TruncSrc = Src.getOperand(0); |
| 9491 | if (TruncSrc.getValueType() == MVT::i32 && |
| 9492 | TruncSrc.getOpcode() == ISD::BITCAST && |
| 9493 | TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { |
| 9494 | return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); |
| 9495 | } |
| 9496 | } |
| 9497 | |
| 9498 | return false; |
| 9499 | } |
| 9500 | case ISD::INTRINSIC_WO_CHAIN: { |
| 9501 | unsigned IntrinsicID |
| 9502 | = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 9503 | // TODO: Handle more intrinsics |
| 9504 | switch (IntrinsicID) { |
| 9505 | case Intrinsic::amdgcn_cvt_pkrtz: |
| 9506 | case Intrinsic::amdgcn_cubeid: |
| 9507 | case Intrinsic::amdgcn_frexp_mant: |
| 9508 | case Intrinsic::amdgcn_fdot2: |
| 9509 | case Intrinsic::amdgcn_rcp: |
| 9510 | case Intrinsic::amdgcn_rsq: |
| 9511 | case Intrinsic::amdgcn_rsq_clamp: |
| 9512 | case Intrinsic::amdgcn_rcp_legacy: |
| 9513 | case Intrinsic::amdgcn_rsq_legacy: |
| 9514 | case Intrinsic::amdgcn_trig_preop: |
| 9515 | return true; |
| 9516 | default: |
| 9517 | break; |
| 9518 | } |
| 9519 | |
| 9520 | LLVM_FALLTHROUGH; |
| 9521 | } |
| 9522 | default: |
| 9523 | return denormalsEnabledForType(DAG, Op.getValueType()) && |
| 9524 | DAG.isKnownNeverSNaN(Op); |
| 9525 | } |
| 9526 | |
| 9527 | llvm_unreachable("invalid operation" ); |
| 9528 | } |
| 9529 | |
| 9530 | // Constant fold canonicalize. |
| 9531 | SDValue SITargetLowering::getCanonicalConstantFP( |
| 9532 | SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { |
| 9533 | // Flush denormals to 0 if not enabled. |
| 9534 | if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) |
| 9535 | return DAG.getConstantFP(0.0, SL, VT); |
| 9536 | |
| 9537 | if (C.isNaN()) { |
| 9538 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); |
| 9539 | if (C.isSignaling()) { |
| 9540 | // Quiet a signaling NaN. |
| 9541 | // FIXME: Is this supposed to preserve payload bits? |
| 9542 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
| 9543 | } |
| 9544 | |
| 9545 | // Make sure it is the canonical NaN bitpattern. |
| 9546 | // |
| 9547 | // TODO: Can we use -1 as the canonical NaN value since it's an inline |
| 9548 | // immediate? |
| 9549 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) |
| 9550 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
| 9551 | } |
| 9552 | |
| 9553 | // Already canonical. |
| 9554 | return DAG.getConstantFP(C, SL, VT); |
| 9555 | } |
| 9556 | |
| 9557 | static bool vectorEltWillFoldAway(SDValue Op) { |
| 9558 | return Op.isUndef() || isa<ConstantFPSDNode>(Op); |
| 9559 | } |
| 9560 | |
| 9561 | SDValue SITargetLowering::performFCanonicalizeCombine( |
| 9562 | SDNode *N, |
| 9563 | DAGCombinerInfo &DCI) const { |
| 9564 | SelectionDAG &DAG = DCI.DAG; |
| 9565 | SDValue N0 = N->getOperand(0); |
| 9566 | EVT VT = N->getValueType(0); |
| 9567 | |
| 9568 | // fcanonicalize undef -> qnan |
| 9569 | if (N0.isUndef()) { |
| 9570 | APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); |
| 9571 | return DAG.getConstantFP(QNaN, SDLoc(N), VT); |
| 9572 | } |
| 9573 | |
| 9574 | if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { |
| 9575 | EVT VT = N->getValueType(0); |
| 9576 | return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); |
| 9577 | } |
| 9578 | |
| 9579 | // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), |
| 9580 | // (fcanonicalize k) |
| 9581 | // |
| 9582 | // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 |
| 9583 | |
| 9584 | // TODO: This could be better with wider vectors that will be split to v2f16, |
| 9585 | // and to consider uses since there aren't that many packed operations. |
| 9586 | if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && |
| 9587 | isTypeLegal(MVT::v2f16)) { |
| 9588 | SDLoc SL(N); |
| 9589 | SDValue NewElts[2]; |
| 9590 | SDValue Lo = N0.getOperand(0); |
| 9591 | SDValue Hi = N0.getOperand(1); |
| 9592 | EVT EltVT = Lo.getValueType(); |
| 9593 | |
| 9594 | if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { |
| 9595 | for (unsigned I = 0; I != 2; ++I) { |
| 9596 | SDValue Op = N0.getOperand(I); |
| 9597 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
| 9598 | NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, |
| 9599 | CFP->getValueAPF()); |
| 9600 | } else if (Op.isUndef()) { |
| 9601 | // Handled below based on what the other operand is. |
| 9602 | NewElts[I] = Op; |
| 9603 | } else { |
| 9604 | NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); |
| 9605 | } |
| 9606 | } |
| 9607 | |
| 9608 | // If one half is undef, and one is constant, perfer a splat vector rather |
| 9609 | // than the normal qNaN. If it's a register, prefer 0.0 since that's |
| 9610 | // cheaper to use and may be free with a packed operation. |
| 9611 | if (NewElts[0].isUndef()) { |
| 9612 | if (isa<ConstantFPSDNode>(NewElts[1])) |
| 9613 | NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? |
| 9614 | NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); |
| 9615 | } |
| 9616 | |
| 9617 | if (NewElts[1].isUndef()) { |
| 9618 | NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? |
| 9619 | NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); |
| 9620 | } |
| 9621 | |
| 9622 | return DAG.getBuildVector(VT, SL, NewElts); |
| 9623 | } |
| 9624 | } |
| 9625 | |
| 9626 | unsigned SrcOpc = N0.getOpcode(); |
| 9627 | |
| 9628 | // If it's free to do so, push canonicalizes further up the source, which may |
| 9629 | // find a canonical source. |
| 9630 | // |
| 9631 | // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for |
| 9632 | // sNaNs. |
| 9633 | if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { |
| 9634 | auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); |
| 9635 | if (CRHS && N0.hasOneUse()) { |
| 9636 | SDLoc SL(N); |
| 9637 | SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, |
| 9638 | N0.getOperand(0)); |
| 9639 | SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); |
| 9640 | DCI.AddToWorklist(Canon0.getNode()); |
| 9641 | |
| 9642 | return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); |
| 9643 | } |
| 9644 | } |
| 9645 | |
| 9646 | return isCanonicalized(DAG, N0) ? N0 : SDValue(); |
| 9647 | } |
| 9648 | |
| 9649 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
| 9650 | switch (Opc) { |
| 9651 | case ISD::FMAXNUM: |
| 9652 | case ISD::FMAXNUM_IEEE: |
| 9653 | return AMDGPUISD::FMAX3; |
| 9654 | case ISD::SMAX: |
| 9655 | return AMDGPUISD::SMAX3; |
| 9656 | case ISD::UMAX: |
| 9657 | return AMDGPUISD::UMAX3; |
| 9658 | case ISD::FMINNUM: |
| 9659 | case ISD::FMINNUM_IEEE: |
| 9660 | return AMDGPUISD::FMIN3; |
| 9661 | case ISD::SMIN: |
| 9662 | return AMDGPUISD::SMIN3; |
| 9663 | case ISD::UMIN: |
| 9664 | return AMDGPUISD::UMIN3; |
| 9665 | default: |
| 9666 | llvm_unreachable("Not a min/max opcode" ); |
| 9667 | } |
| 9668 | } |
| 9669 | |
| 9670 | SDValue SITargetLowering::performIntMed3ImmCombine( |
| 9671 | SelectionDAG &DAG, const SDLoc &SL, |
| 9672 | SDValue Op0, SDValue Op1, bool Signed) const { |
| 9673 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); |
| 9674 | if (!K1) |
| 9675 | return SDValue(); |
| 9676 | |
| 9677 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); |
| 9678 | if (!K0) |
| 9679 | return SDValue(); |
| 9680 | |
| 9681 | if (Signed) { |
| 9682 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) |
| 9683 | return SDValue(); |
| 9684 | } else { |
| 9685 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) |
| 9686 | return SDValue(); |
| 9687 | } |
| 9688 | |
| 9689 | EVT VT = K0->getValueType(0); |
| 9690 | unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; |
| 9691 | if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { |
| 9692 | return DAG.getNode(Med3Opc, SL, VT, |
| 9693 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); |
| 9694 | } |
| 9695 | |
| 9696 | // If there isn't a 16-bit med3 operation, convert to 32-bit. |
| 9697 | MVT NVT = MVT::i32; |
| 9698 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 9699 | |
| 9700 | SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); |
| 9701 | SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); |
| 9702 | SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); |
| 9703 | |
| 9704 | SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); |
| 9705 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); |
| 9706 | } |
| 9707 | |
| 9708 | static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { |
| 9709 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) |
| 9710 | return C; |
| 9711 | |
| 9712 | if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { |
| 9713 | if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) |
| 9714 | return C; |
| 9715 | } |
| 9716 | |
| 9717 | return nullptr; |
| 9718 | } |
| 9719 | |
| 9720 | SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, |
| 9721 | const SDLoc &SL, |
| 9722 | SDValue Op0, |
| 9723 | SDValue Op1) const { |
| 9724 | ConstantFPSDNode *K1 = getSplatConstantFP(Op1); |
| 9725 | if (!K1) |
| 9726 | return SDValue(); |
| 9727 | |
| 9728 | ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); |
| 9729 | if (!K0) |
| 9730 | return SDValue(); |
| 9731 | |
| 9732 | // Ordered >= (although NaN inputs should have folded away by now). |
| 9733 | if (K0->getValueAPF() > K1->getValueAPF()) |
| 9734 | return SDValue(); |
| 9735 | |
| 9736 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 9737 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 9738 | |
| 9739 | // TODO: Check IEEE bit enabled? |
| 9740 | EVT VT = Op0.getValueType(); |
| 9741 | if (Info->getMode().DX10Clamp) { |
| 9742 | // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the |
| 9743 | // hardware fmed3 behavior converting to a min. |
| 9744 | // FIXME: Should this be allowing -0.0? |
| 9745 | if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) |
| 9746 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); |
| 9747 | } |
| 9748 | |
| 9749 | // med3 for f16 is only available on gfx9+, and not available for v2f16. |
| 9750 | if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { |
| 9751 | // This isn't safe with signaling NaNs because in IEEE mode, min/max on a |
| 9752 | // signaling NaN gives a quiet NaN. The quiet NaN input to the min would |
| 9753 | // then give the other result, which is different from med3 with a NaN |
| 9754 | // input. |
| 9755 | SDValue Var = Op0.getOperand(0); |
| 9756 | if (!DAG.isKnownNeverSNaN(Var)) |
| 9757 | return SDValue(); |
| 9758 | |
| 9759 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 9760 | |
| 9761 | if ((!K0->hasOneUse() || |
| 9762 | TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && |
| 9763 | (!K1->hasOneUse() || |
| 9764 | TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { |
| 9765 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), |
| 9766 | Var, SDValue(K0, 0), SDValue(K1, 0)); |
| 9767 | } |
| 9768 | } |
| 9769 | |
| 9770 | return SDValue(); |
| 9771 | } |
| 9772 | |
| 9773 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, |
| 9774 | DAGCombinerInfo &DCI) const { |
| 9775 | SelectionDAG &DAG = DCI.DAG; |
| 9776 | |
| 9777 | EVT VT = N->getValueType(0); |
| 9778 | unsigned Opc = N->getOpcode(); |
| 9779 | SDValue Op0 = N->getOperand(0); |
| 9780 | SDValue Op1 = N->getOperand(1); |
| 9781 | |
| 9782 | // Only do this if the inner op has one use since this will just increases |
| 9783 | // register pressure for no benefit. |
| 9784 | |
| 9785 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && |
| 9786 | !VT.isVector() && |
| 9787 | (VT == MVT::i32 || VT == MVT::f32 || |
| 9788 | ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { |
| 9789 | // max(max(a, b), c) -> max3(a, b, c) |
| 9790 | // min(min(a, b), c) -> min3(a, b, c) |
| 9791 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
| 9792 | SDLoc DL(N); |
| 9793 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
| 9794 | DL, |
| 9795 | N->getValueType(0), |
| 9796 | Op0.getOperand(0), |
| 9797 | Op0.getOperand(1), |
| 9798 | Op1); |
| 9799 | } |
| 9800 | |
| 9801 | // Try commuted. |
| 9802 | // max(a, max(b, c)) -> max3(a, b, c) |
| 9803 | // min(a, min(b, c)) -> min3(a, b, c) |
| 9804 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
| 9805 | SDLoc DL(N); |
| 9806 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
| 9807 | DL, |
| 9808 | N->getValueType(0), |
| 9809 | Op0, |
| 9810 | Op1.getOperand(0), |
| 9811 | Op1.getOperand(1)); |
| 9812 | } |
| 9813 | } |
| 9814 | |
| 9815 | // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) |
| 9816 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { |
| 9817 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) |
| 9818 | return Med3; |
| 9819 | } |
| 9820 | |
| 9821 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { |
| 9822 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) |
| 9823 | return Med3; |
| 9824 | } |
| 9825 | |
| 9826 | // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) |
| 9827 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || |
| 9828 | (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || |
| 9829 | (Opc == AMDGPUISD::FMIN_LEGACY && |
| 9830 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && |
| 9831 | (VT == MVT::f32 || VT == MVT::f64 || |
| 9832 | (VT == MVT::f16 && Subtarget->has16BitInsts()) || |
| 9833 | (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && |
| 9834 | Op0.hasOneUse()) { |
| 9835 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) |
| 9836 | return Res; |
| 9837 | } |
| 9838 | |
| 9839 | return SDValue(); |
| 9840 | } |
| 9841 | |
| 9842 | static bool isClampZeroToOne(SDValue A, SDValue B) { |
| 9843 | if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { |
| 9844 | if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { |
| 9845 | // FIXME: Should this be allowing -0.0? |
| 9846 | return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || |
| 9847 | (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); |
| 9848 | } |
| 9849 | } |
| 9850 | |
| 9851 | return false; |
| 9852 | } |
| 9853 | |
| 9854 | // FIXME: Should only worry about snans for version with chain. |
| 9855 | SDValue SITargetLowering::performFMed3Combine(SDNode *N, |
| 9856 | DAGCombinerInfo &DCI) const { |
| 9857 | EVT VT = N->getValueType(0); |
| 9858 | // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and |
| 9859 | // NaNs. With a NaN input, the order of the operands may change the result. |
| 9860 | |
| 9861 | SelectionDAG &DAG = DCI.DAG; |
| 9862 | SDLoc SL(N); |
| 9863 | |
| 9864 | SDValue Src0 = N->getOperand(0); |
| 9865 | SDValue Src1 = N->getOperand(1); |
| 9866 | SDValue Src2 = N->getOperand(2); |
| 9867 | |
| 9868 | if (isClampZeroToOne(Src0, Src1)) { |
| 9869 | // const_a, const_b, x -> clamp is safe in all cases including signaling |
| 9870 | // nans. |
| 9871 | // FIXME: Should this be allowing -0.0? |
| 9872 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); |
| 9873 | } |
| 9874 | |
| 9875 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 9876 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 9877 | |
| 9878 | // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother |
| 9879 | // handling no dx10-clamp? |
| 9880 | if (Info->getMode().DX10Clamp) { |
| 9881 | // If NaNs is clamped to 0, we are free to reorder the inputs. |
| 9882 | |
| 9883 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
| 9884 | std::swap(Src0, Src1); |
| 9885 | |
| 9886 | if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) |
| 9887 | std::swap(Src1, Src2); |
| 9888 | |
| 9889 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
| 9890 | std::swap(Src0, Src1); |
| 9891 | |
| 9892 | if (isClampZeroToOne(Src1, Src2)) |
| 9893 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); |
| 9894 | } |
| 9895 | |
| 9896 | return SDValue(); |
| 9897 | } |
| 9898 | |
| 9899 | SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, |
| 9900 | DAGCombinerInfo &DCI) const { |
| 9901 | SDValue Src0 = N->getOperand(0); |
| 9902 | SDValue Src1 = N->getOperand(1); |
| 9903 | if (Src0.isUndef() && Src1.isUndef()) |
| 9904 | return DCI.DAG.getUNDEF(N->getValueType(0)); |
| 9905 | return SDValue(); |
| 9906 | } |
| 9907 | |
| 9908 | // Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be |
| 9909 | // expanded into a set of cmp/select instructions. |
| 9910 | bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize, |
| 9911 | unsigned NumElem, |
| 9912 | bool IsDivergentIdx) { |
| 9913 | if (UseDivergentRegisterIndexing) |
| 9914 | return false; |
| 9915 | |
| 9916 | unsigned VecSize = EltSize * NumElem; |
| 9917 | |
| 9918 | // Sub-dword vectors of size 2 dword or less have better implementation. |
| 9919 | if (VecSize <= 64 && EltSize < 32) |
| 9920 | return false; |
| 9921 | |
| 9922 | // Always expand the rest of sub-dword instructions, otherwise it will be |
| 9923 | // lowered via memory. |
| 9924 | if (EltSize < 32) |
| 9925 | return true; |
| 9926 | |
| 9927 | // Always do this if var-idx is divergent, otherwise it will become a loop. |
| 9928 | if (IsDivergentIdx) |
| 9929 | return true; |
| 9930 | |
| 9931 | // Large vectors would yield too many compares and v_cndmask_b32 instructions. |
| 9932 | unsigned NumInsts = NumElem /* Number of compares */ + |
| 9933 | ((EltSize + 31) / 32) * NumElem /* Number of cndmasks */; |
| 9934 | return NumInsts <= 16; |
| 9935 | } |
| 9936 | |
| 9937 | static bool shouldExpandVectorDynExt(SDNode *N) { |
| 9938 | SDValue Idx = N->getOperand(N->getNumOperands() - 1); |
| 9939 | if (isa<ConstantSDNode>(Idx)) |
| 9940 | return false; |
| 9941 | |
| 9942 | SDValue Vec = N->getOperand(0); |
| 9943 | EVT VecVT = Vec.getValueType(); |
| 9944 | EVT EltVT = VecVT.getVectorElementType(); |
| 9945 | unsigned EltSize = EltVT.getSizeInBits(); |
| 9946 | unsigned NumElem = VecVT.getVectorNumElements(); |
| 9947 | |
| 9948 | return SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, |
| 9949 | Idx->isDivergent()); |
| 9950 | } |
| 9951 | |
| 9952 | SDValue SITargetLowering::( |
| 9953 | SDNode *N, DAGCombinerInfo &DCI) const { |
| 9954 | SDValue Vec = N->getOperand(0); |
| 9955 | SelectionDAG &DAG = DCI.DAG; |
| 9956 | |
| 9957 | EVT VecVT = Vec.getValueType(); |
| 9958 | EVT EltVT = VecVT.getVectorElementType(); |
| 9959 | |
| 9960 | if ((Vec.getOpcode() == ISD::FNEG || |
| 9961 | Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { |
| 9962 | SDLoc SL(N); |
| 9963 | EVT EltVT = N->getValueType(0); |
| 9964 | SDValue Idx = N->getOperand(1); |
| 9965 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
| 9966 | Vec.getOperand(0), Idx); |
| 9967 | return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); |
| 9968 | } |
| 9969 | |
| 9970 | // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) |
| 9971 | // => |
| 9972 | // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) |
| 9973 | // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) |
| 9974 | // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt |
| 9975 | if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { |
| 9976 | SDLoc SL(N); |
| 9977 | EVT EltVT = N->getValueType(0); |
| 9978 | SDValue Idx = N->getOperand(1); |
| 9979 | unsigned Opc = Vec.getOpcode(); |
| 9980 | |
| 9981 | switch(Opc) { |
| 9982 | default: |
| 9983 | break; |
| 9984 | // TODO: Support other binary operations. |
| 9985 | case ISD::FADD: |
| 9986 | case ISD::FSUB: |
| 9987 | case ISD::FMUL: |
| 9988 | case ISD::ADD: |
| 9989 | case ISD::UMIN: |
| 9990 | case ISD::UMAX: |
| 9991 | case ISD::SMIN: |
| 9992 | case ISD::SMAX: |
| 9993 | case ISD::FMAXNUM: |
| 9994 | case ISD::FMINNUM: |
| 9995 | case ISD::FMAXNUM_IEEE: |
| 9996 | case ISD::FMINNUM_IEEE: { |
| 9997 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
| 9998 | Vec.getOperand(0), Idx); |
| 9999 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
| 10000 | Vec.getOperand(1), Idx); |
| 10001 | |
| 10002 | DCI.AddToWorklist(Elt0.getNode()); |
| 10003 | DCI.AddToWorklist(Elt1.getNode()); |
| 10004 | return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); |
| 10005 | } |
| 10006 | } |
| 10007 | } |
| 10008 | |
| 10009 | unsigned VecSize = VecVT.getSizeInBits(); |
| 10010 | unsigned EltSize = EltVT.getSizeInBits(); |
| 10011 | |
| 10012 | // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) |
| 10013 | if (::shouldExpandVectorDynExt(N)) { |
| 10014 | SDLoc SL(N); |
| 10015 | SDValue Idx = N->getOperand(1); |
| 10016 | SDValue V; |
| 10017 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
| 10018 | SDValue IC = DAG.getVectorIdxConstant(I, SL); |
| 10019 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
| 10020 | if (I == 0) |
| 10021 | V = Elt; |
| 10022 | else |
| 10023 | V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); |
| 10024 | } |
| 10025 | return V; |
| 10026 | } |
| 10027 | |
| 10028 | if (!DCI.isBeforeLegalize()) |
| 10029 | return SDValue(); |
| 10030 | |
| 10031 | // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit |
| 10032 | // elements. This exposes more load reduction opportunities by replacing |
| 10033 | // multiple small extract_vector_elements with a single 32-bit extract. |
| 10034 | auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 10035 | if (isa<MemSDNode>(Vec) && |
| 10036 | EltSize <= 16 && |
| 10037 | EltVT.isByteSized() && |
| 10038 | VecSize > 32 && |
| 10039 | VecSize % 32 == 0 && |
| 10040 | Idx) { |
| 10041 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); |
| 10042 | |
| 10043 | unsigned BitIndex = Idx->getZExtValue() * EltSize; |
| 10044 | unsigned EltIdx = BitIndex / 32; |
| 10045 | unsigned LeftoverBitIdx = BitIndex % 32; |
| 10046 | SDLoc SL(N); |
| 10047 | |
| 10048 | SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); |
| 10049 | DCI.AddToWorklist(Cast.getNode()); |
| 10050 | |
| 10051 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, |
| 10052 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
| 10053 | DCI.AddToWorklist(Elt.getNode()); |
| 10054 | SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, |
| 10055 | DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); |
| 10056 | DCI.AddToWorklist(Srl.getNode()); |
| 10057 | |
| 10058 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); |
| 10059 | DCI.AddToWorklist(Trunc.getNode()); |
| 10060 | return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); |
| 10061 | } |
| 10062 | |
| 10063 | return SDValue(); |
| 10064 | } |
| 10065 | |
| 10066 | SDValue |
| 10067 | SITargetLowering::performInsertVectorEltCombine(SDNode *N, |
| 10068 | DAGCombinerInfo &DCI) const { |
| 10069 | SDValue Vec = N->getOperand(0); |
| 10070 | SDValue Idx = N->getOperand(2); |
| 10071 | EVT VecVT = Vec.getValueType(); |
| 10072 | EVT EltVT = VecVT.getVectorElementType(); |
| 10073 | |
| 10074 | // INSERT_VECTOR_ELT (<n x e>, var-idx) |
| 10075 | // => BUILD_VECTOR n x select (e, const-idx) |
| 10076 | if (!::shouldExpandVectorDynExt(N)) |
| 10077 | return SDValue(); |
| 10078 | |
| 10079 | SelectionDAG &DAG = DCI.DAG; |
| 10080 | SDLoc SL(N); |
| 10081 | SDValue Ins = N->getOperand(1); |
| 10082 | EVT IdxVT = Idx.getValueType(); |
| 10083 | |
| 10084 | SmallVector<SDValue, 16> Ops; |
| 10085 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
| 10086 | SDValue IC = DAG.getConstant(I, SL, IdxVT); |
| 10087 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
| 10088 | SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); |
| 10089 | Ops.push_back(V); |
| 10090 | } |
| 10091 | |
| 10092 | return DAG.getBuildVector(VecVT, SL, Ops); |
| 10093 | } |
| 10094 | |
| 10095 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, |
| 10096 | const SDNode *N0, |
| 10097 | const SDNode *N1) const { |
| 10098 | EVT VT = N0->getValueType(0); |
| 10099 | |
| 10100 | // Only do this if we are not trying to support denormals. v_mad_f32 does not |
| 10101 | // support denormals ever. |
| 10102 | if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || |
| 10103 | (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && |
| 10104 | getSubtarget()->hasMadF16())) && |
| 10105 | isOperationLegal(ISD::FMAD, VT)) |
| 10106 | return ISD::FMAD; |
| 10107 | |
| 10108 | const TargetOptions &Options = DAG.getTarget().Options; |
| 10109 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
| 10110 | (N0->getFlags().hasAllowContract() && |
| 10111 | N1->getFlags().hasAllowContract())) && |
| 10112 | isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { |
| 10113 | return ISD::FMA; |
| 10114 | } |
| 10115 | |
| 10116 | return 0; |
| 10117 | } |
| 10118 | |
| 10119 | // For a reassociatable opcode perform: |
| 10120 | // op x, (op y, z) -> op (op x, z), y, if x and z are uniform |
| 10121 | SDValue SITargetLowering::reassociateScalarOps(SDNode *N, |
| 10122 | SelectionDAG &DAG) const { |
| 10123 | EVT VT = N->getValueType(0); |
| 10124 | if (VT != MVT::i32 && VT != MVT::i64) |
| 10125 | return SDValue(); |
| 10126 | |
| 10127 | unsigned Opc = N->getOpcode(); |
| 10128 | SDValue Op0 = N->getOperand(0); |
| 10129 | SDValue Op1 = N->getOperand(1); |
| 10130 | |
| 10131 | if (!(Op0->isDivergent() ^ Op1->isDivergent())) |
| 10132 | return SDValue(); |
| 10133 | |
| 10134 | if (Op0->isDivergent()) |
| 10135 | std::swap(Op0, Op1); |
| 10136 | |
| 10137 | if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) |
| 10138 | return SDValue(); |
| 10139 | |
| 10140 | SDValue Op2 = Op1.getOperand(1); |
| 10141 | Op1 = Op1.getOperand(0); |
| 10142 | if (!(Op1->isDivergent() ^ Op2->isDivergent())) |
| 10143 | return SDValue(); |
| 10144 | |
| 10145 | if (Op1->isDivergent()) |
| 10146 | std::swap(Op1, Op2); |
| 10147 | |
| 10148 | // If either operand is constant this will conflict with |
| 10149 | // DAGCombiner::ReassociateOps(). |
| 10150 | if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || |
| 10151 | DAG.isConstantIntBuildVectorOrConstantInt(Op1)) |
| 10152 | return SDValue(); |
| 10153 | |
| 10154 | SDLoc SL(N); |
| 10155 | SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); |
| 10156 | return DAG.getNode(Opc, SL, VT, Add1, Op2); |
| 10157 | } |
| 10158 | |
| 10159 | static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, |
| 10160 | EVT VT, |
| 10161 | SDValue N0, SDValue N1, SDValue N2, |
| 10162 | bool Signed) { |
| 10163 | unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; |
| 10164 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); |
| 10165 | SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); |
| 10166 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); |
| 10167 | } |
| 10168 | |
| 10169 | SDValue SITargetLowering::performAddCombine(SDNode *N, |
| 10170 | DAGCombinerInfo &DCI) const { |
| 10171 | SelectionDAG &DAG = DCI.DAG; |
| 10172 | EVT VT = N->getValueType(0); |
| 10173 | SDLoc SL(N); |
| 10174 | SDValue LHS = N->getOperand(0); |
| 10175 | SDValue RHS = N->getOperand(1); |
| 10176 | |
| 10177 | if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) |
| 10178 | && Subtarget->hasMad64_32() && |
| 10179 | !VT.isVector() && VT.getScalarSizeInBits() > 32 && |
| 10180 | VT.getScalarSizeInBits() <= 64) { |
| 10181 | if (LHS.getOpcode() != ISD::MUL) |
| 10182 | std::swap(LHS, RHS); |
| 10183 | |
| 10184 | SDValue MulLHS = LHS.getOperand(0); |
| 10185 | SDValue MulRHS = LHS.getOperand(1); |
| 10186 | SDValue AddRHS = RHS; |
| 10187 | |
| 10188 | // TODO: Maybe restrict if SGPR inputs. |
| 10189 | if (numBitsUnsigned(MulLHS, DAG) <= 32 && |
| 10190 | numBitsUnsigned(MulRHS, DAG) <= 32) { |
| 10191 | MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); |
| 10192 | MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); |
| 10193 | AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); |
| 10194 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); |
| 10195 | } |
| 10196 | |
| 10197 | if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { |
| 10198 | MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); |
| 10199 | MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); |
| 10200 | AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); |
| 10201 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); |
| 10202 | } |
| 10203 | |
| 10204 | return SDValue(); |
| 10205 | } |
| 10206 | |
| 10207 | if (SDValue V = reassociateScalarOps(N, DAG)) { |
| 10208 | return V; |
| 10209 | } |
| 10210 | |
| 10211 | if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) |
| 10212 | return SDValue(); |
| 10213 | |
| 10214 | // add x, zext (setcc) => addcarry x, 0, setcc |
| 10215 | // add x, sext (setcc) => subcarry x, 0, setcc |
| 10216 | unsigned Opc = LHS.getOpcode(); |
| 10217 | if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || |
| 10218 | Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) |
| 10219 | std::swap(RHS, LHS); |
| 10220 | |
| 10221 | Opc = RHS.getOpcode(); |
| 10222 | switch (Opc) { |
| 10223 | default: break; |
| 10224 | case ISD::ZERO_EXTEND: |
| 10225 | case ISD::SIGN_EXTEND: |
| 10226 | case ISD::ANY_EXTEND: { |
| 10227 | auto Cond = RHS.getOperand(0); |
| 10228 | // If this won't be a real VOPC output, we would still need to insert an |
| 10229 | // extra instruction anyway. |
| 10230 | if (!isBoolSGPR(Cond)) |
| 10231 | break; |
| 10232 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
| 10233 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
| 10234 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; |
| 10235 | return DAG.getNode(Opc, SL, VTList, Args); |
| 10236 | } |
| 10237 | case ISD::ADDCARRY: { |
| 10238 | // add x, (addcarry y, 0, cc) => addcarry x, y, cc |
| 10239 | auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
| 10240 | if (!C || C->getZExtValue() != 0) break; |
| 10241 | SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; |
| 10242 | return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); |
| 10243 | } |
| 10244 | } |
| 10245 | return SDValue(); |
| 10246 | } |
| 10247 | |
| 10248 | SDValue SITargetLowering::performSubCombine(SDNode *N, |
| 10249 | DAGCombinerInfo &DCI) const { |
| 10250 | SelectionDAG &DAG = DCI.DAG; |
| 10251 | EVT VT = N->getValueType(0); |
| 10252 | |
| 10253 | if (VT != MVT::i32) |
| 10254 | return SDValue(); |
| 10255 | |
| 10256 | SDLoc SL(N); |
| 10257 | SDValue LHS = N->getOperand(0); |
| 10258 | SDValue RHS = N->getOperand(1); |
| 10259 | |
| 10260 | // sub x, zext (setcc) => subcarry x, 0, setcc |
| 10261 | // sub x, sext (setcc) => addcarry x, 0, setcc |
| 10262 | unsigned Opc = RHS.getOpcode(); |
| 10263 | switch (Opc) { |
| 10264 | default: break; |
| 10265 | case ISD::ZERO_EXTEND: |
| 10266 | case ISD::SIGN_EXTEND: |
| 10267 | case ISD::ANY_EXTEND: { |
| 10268 | auto Cond = RHS.getOperand(0); |
| 10269 | // If this won't be a real VOPC output, we would still need to insert an |
| 10270 | // extra instruction anyway. |
| 10271 | if (!isBoolSGPR(Cond)) |
| 10272 | break; |
| 10273 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
| 10274 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
| 10275 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; |
| 10276 | return DAG.getNode(Opc, SL, VTList, Args); |
| 10277 | } |
| 10278 | } |
| 10279 | |
| 10280 | if (LHS.getOpcode() == ISD::SUBCARRY) { |
| 10281 | // sub (subcarry x, 0, cc), y => subcarry x, y, cc |
| 10282 | auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
| 10283 | if (!C || !C->isNullValue()) |
| 10284 | return SDValue(); |
| 10285 | SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; |
| 10286 | return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); |
| 10287 | } |
| 10288 | return SDValue(); |
| 10289 | } |
| 10290 | |
| 10291 | SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, |
| 10292 | DAGCombinerInfo &DCI) const { |
| 10293 | |
| 10294 | if (N->getValueType(0) != MVT::i32) |
| 10295 | return SDValue(); |
| 10296 | |
| 10297 | auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 10298 | if (!C || C->getZExtValue() != 0) |
| 10299 | return SDValue(); |
| 10300 | |
| 10301 | SelectionDAG &DAG = DCI.DAG; |
| 10302 | SDValue LHS = N->getOperand(0); |
| 10303 | |
| 10304 | // addcarry (add x, y), 0, cc => addcarry x, y, cc |
| 10305 | // subcarry (sub x, y), 0, cc => subcarry x, y, cc |
| 10306 | unsigned LHSOpc = LHS.getOpcode(); |
| 10307 | unsigned Opc = N->getOpcode(); |
| 10308 | if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || |
| 10309 | (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { |
| 10310 | SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; |
| 10311 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); |
| 10312 | } |
| 10313 | return SDValue(); |
| 10314 | } |
| 10315 | |
| 10316 | SDValue SITargetLowering::performFAddCombine(SDNode *N, |
| 10317 | DAGCombinerInfo &DCI) const { |
| 10318 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
| 10319 | return SDValue(); |
| 10320 | |
| 10321 | SelectionDAG &DAG = DCI.DAG; |
| 10322 | EVT VT = N->getValueType(0); |
| 10323 | |
| 10324 | SDLoc SL(N); |
| 10325 | SDValue LHS = N->getOperand(0); |
| 10326 | SDValue RHS = N->getOperand(1); |
| 10327 | |
| 10328 | // These should really be instruction patterns, but writing patterns with |
| 10329 | // source modiifiers is a pain. |
| 10330 | |
| 10331 | // fadd (fadd (a, a), b) -> mad 2.0, a, b |
| 10332 | if (LHS.getOpcode() == ISD::FADD) { |
| 10333 | SDValue A = LHS.getOperand(0); |
| 10334 | if (A == LHS.getOperand(1)) { |
| 10335 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
| 10336 | if (FusedOp != 0) { |
| 10337 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
| 10338 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); |
| 10339 | } |
| 10340 | } |
| 10341 | } |
| 10342 | |
| 10343 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b |
| 10344 | if (RHS.getOpcode() == ISD::FADD) { |
| 10345 | SDValue A = RHS.getOperand(0); |
| 10346 | if (A == RHS.getOperand(1)) { |
| 10347 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
| 10348 | if (FusedOp != 0) { |
| 10349 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
| 10350 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); |
| 10351 | } |
| 10352 | } |
| 10353 | } |
| 10354 | |
| 10355 | return SDValue(); |
| 10356 | } |
| 10357 | |
| 10358 | SDValue SITargetLowering::performFSubCombine(SDNode *N, |
| 10359 | DAGCombinerInfo &DCI) const { |
| 10360 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
| 10361 | return SDValue(); |
| 10362 | |
| 10363 | SelectionDAG &DAG = DCI.DAG; |
| 10364 | SDLoc SL(N); |
| 10365 | EVT VT = N->getValueType(0); |
| 10366 | assert(!VT.isVector()); |
| 10367 | |
| 10368 | // Try to get the fneg to fold into the source modifier. This undoes generic |
| 10369 | // DAG combines and folds them into the mad. |
| 10370 | // |
| 10371 | // Only do this if we are not trying to support denormals. v_mad_f32 does |
| 10372 | // not support denormals ever. |
| 10373 | SDValue LHS = N->getOperand(0); |
| 10374 | SDValue RHS = N->getOperand(1); |
| 10375 | if (LHS.getOpcode() == ISD::FADD) { |
| 10376 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) |
| 10377 | SDValue A = LHS.getOperand(0); |
| 10378 | if (A == LHS.getOperand(1)) { |
| 10379 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
| 10380 | if (FusedOp != 0){ |
| 10381 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
| 10382 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
| 10383 | |
| 10384 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); |
| 10385 | } |
| 10386 | } |
| 10387 | } |
| 10388 | |
| 10389 | if (RHS.getOpcode() == ISD::FADD) { |
| 10390 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c |
| 10391 | |
| 10392 | SDValue A = RHS.getOperand(0); |
| 10393 | if (A == RHS.getOperand(1)) { |
| 10394 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
| 10395 | if (FusedOp != 0){ |
| 10396 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); |
| 10397 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); |
| 10398 | } |
| 10399 | } |
| 10400 | } |
| 10401 | |
| 10402 | return SDValue(); |
| 10403 | } |
| 10404 | |
| 10405 | SDValue SITargetLowering::performFMACombine(SDNode *N, |
| 10406 | DAGCombinerInfo &DCI) const { |
| 10407 | SelectionDAG &DAG = DCI.DAG; |
| 10408 | EVT VT = N->getValueType(0); |
| 10409 | SDLoc SL(N); |
| 10410 | |
| 10411 | if (!Subtarget->hasDot2Insts() || VT != MVT::f32) |
| 10412 | return SDValue(); |
| 10413 | |
| 10414 | // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> |
| 10415 | // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) |
| 10416 | SDValue Op1 = N->getOperand(0); |
| 10417 | SDValue Op2 = N->getOperand(1); |
| 10418 | SDValue FMA = N->getOperand(2); |
| 10419 | |
| 10420 | if (FMA.getOpcode() != ISD::FMA || |
| 10421 | Op1.getOpcode() != ISD::FP_EXTEND || |
| 10422 | Op2.getOpcode() != ISD::FP_EXTEND) |
| 10423 | return SDValue(); |
| 10424 | |
| 10425 | // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, |
| 10426 | // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract |
| 10427 | // is sufficient to allow generaing fdot2. |
| 10428 | const TargetOptions &Options = DAG.getTarget().Options; |
| 10429 | if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
| 10430 | (N->getFlags().hasAllowContract() && |
| 10431 | FMA->getFlags().hasAllowContract())) { |
| 10432 | Op1 = Op1.getOperand(0); |
| 10433 | Op2 = Op2.getOperand(0); |
| 10434 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 10435 | Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 10436 | return SDValue(); |
| 10437 | |
| 10438 | SDValue Vec1 = Op1.getOperand(0); |
| 10439 | SDValue Idx1 = Op1.getOperand(1); |
| 10440 | SDValue Vec2 = Op2.getOperand(0); |
| 10441 | |
| 10442 | SDValue FMAOp1 = FMA.getOperand(0); |
| 10443 | SDValue FMAOp2 = FMA.getOperand(1); |
| 10444 | SDValue FMAAcc = FMA.getOperand(2); |
| 10445 | |
| 10446 | if (FMAOp1.getOpcode() != ISD::FP_EXTEND || |
| 10447 | FMAOp2.getOpcode() != ISD::FP_EXTEND) |
| 10448 | return SDValue(); |
| 10449 | |
| 10450 | FMAOp1 = FMAOp1.getOperand(0); |
| 10451 | FMAOp2 = FMAOp2.getOperand(0); |
| 10452 | if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 10453 | FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 10454 | return SDValue(); |
| 10455 | |
| 10456 | SDValue Vec3 = FMAOp1.getOperand(0); |
| 10457 | SDValue Vec4 = FMAOp2.getOperand(0); |
| 10458 | SDValue Idx2 = FMAOp1.getOperand(1); |
| 10459 | |
| 10460 | if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || |
| 10461 | // Idx1 and Idx2 cannot be the same. |
| 10462 | Idx1 == Idx2) |
| 10463 | return SDValue(); |
| 10464 | |
| 10465 | if (Vec1 == Vec2 || Vec3 == Vec4) |
| 10466 | return SDValue(); |
| 10467 | |
| 10468 | if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) |
| 10469 | return SDValue(); |
| 10470 | |
| 10471 | if ((Vec1 == Vec3 && Vec2 == Vec4) || |
| 10472 | (Vec1 == Vec4 && Vec2 == Vec3)) { |
| 10473 | return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, |
| 10474 | DAG.getTargetConstant(0, SL, MVT::i1)); |
| 10475 | } |
| 10476 | } |
| 10477 | return SDValue(); |
| 10478 | } |
| 10479 | |
| 10480 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
| 10481 | DAGCombinerInfo &DCI) const { |
| 10482 | SelectionDAG &DAG = DCI.DAG; |
| 10483 | SDLoc SL(N); |
| 10484 | |
| 10485 | SDValue LHS = N->getOperand(0); |
| 10486 | SDValue RHS = N->getOperand(1); |
| 10487 | EVT VT = LHS.getValueType(); |
| 10488 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
| 10489 | |
| 10490 | auto CRHS = dyn_cast<ConstantSDNode>(RHS); |
| 10491 | if (!CRHS) { |
| 10492 | CRHS = dyn_cast<ConstantSDNode>(LHS); |
| 10493 | if (CRHS) { |
| 10494 | std::swap(LHS, RHS); |
| 10495 | CC = getSetCCSwappedOperands(CC); |
| 10496 | } |
| 10497 | } |
| 10498 | |
| 10499 | if (CRHS) { |
| 10500 | if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && |
| 10501 | isBoolSGPR(LHS.getOperand(0))) { |
| 10502 | // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 |
| 10503 | // setcc (sext from i1 cc), -1, eq|sle|uge) => cc |
| 10504 | // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 |
| 10505 | // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc |
| 10506 | if ((CRHS->isAllOnesValue() && |
| 10507 | (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || |
| 10508 | (CRHS->isNullValue() && |
| 10509 | (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) |
| 10510 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
| 10511 | DAG.getConstant(-1, SL, MVT::i1)); |
| 10512 | if ((CRHS->isAllOnesValue() && |
| 10513 | (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || |
| 10514 | (CRHS->isNullValue() && |
| 10515 | (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) |
| 10516 | return LHS.getOperand(0); |
| 10517 | } |
| 10518 | |
| 10519 | uint64_t CRHSVal = CRHS->getZExtValue(); |
| 10520 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && |
| 10521 | LHS.getOpcode() == ISD::SELECT && |
| 10522 | isa<ConstantSDNode>(LHS.getOperand(1)) && |
| 10523 | isa<ConstantSDNode>(LHS.getOperand(2)) && |
| 10524 | LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && |
| 10525 | isBoolSGPR(LHS.getOperand(0))) { |
| 10526 | // Given CT != FT: |
| 10527 | // setcc (select cc, CT, CF), CF, eq => xor cc, -1 |
| 10528 | // setcc (select cc, CT, CF), CF, ne => cc |
| 10529 | // setcc (select cc, CT, CF), CT, ne => xor cc, -1 |
| 10530 | // setcc (select cc, CT, CF), CT, eq => cc |
| 10531 | uint64_t CT = LHS.getConstantOperandVal(1); |
| 10532 | uint64_t CF = LHS.getConstantOperandVal(2); |
| 10533 | |
| 10534 | if ((CF == CRHSVal && CC == ISD::SETEQ) || |
| 10535 | (CT == CRHSVal && CC == ISD::SETNE)) |
| 10536 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
| 10537 | DAG.getConstant(-1, SL, MVT::i1)); |
| 10538 | if ((CF == CRHSVal && CC == ISD::SETNE) || |
| 10539 | (CT == CRHSVal && CC == ISD::SETEQ)) |
| 10540 | return LHS.getOperand(0); |
| 10541 | } |
| 10542 | } |
| 10543 | |
| 10544 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && |
| 10545 | VT != MVT::f16)) |
| 10546 | return SDValue(); |
| 10547 | |
| 10548 | // Match isinf/isfinite pattern |
| 10549 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) |
| 10550 | // (fcmp one (fabs x), inf) -> (fp_class x, |
| 10551 | // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) |
| 10552 | if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { |
| 10553 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
| 10554 | if (!CRHS) |
| 10555 | return SDValue(); |
| 10556 | |
| 10557 | const APFloat &APF = CRHS->getValueAPF(); |
| 10558 | if (APF.isInfinity() && !APF.isNegative()) { |
| 10559 | const unsigned IsInfMask = SIInstrFlags::P_INFINITY | |
| 10560 | SIInstrFlags::N_INFINITY; |
| 10561 | const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | |
| 10562 | SIInstrFlags::P_ZERO | |
| 10563 | SIInstrFlags::N_NORMAL | |
| 10564 | SIInstrFlags::P_NORMAL | |
| 10565 | SIInstrFlags::N_SUBNORMAL | |
| 10566 | SIInstrFlags::P_SUBNORMAL; |
| 10567 | unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; |
| 10568 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), |
| 10569 | DAG.getConstant(Mask, SL, MVT::i32)); |
| 10570 | } |
| 10571 | } |
| 10572 | |
| 10573 | return SDValue(); |
| 10574 | } |
| 10575 | |
| 10576 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, |
| 10577 | DAGCombinerInfo &DCI) const { |
| 10578 | SelectionDAG &DAG = DCI.DAG; |
| 10579 | SDLoc SL(N); |
| 10580 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
| 10581 | |
| 10582 | SDValue Src = N->getOperand(0); |
| 10583 | SDValue Shift = N->getOperand(0); |
| 10584 | |
| 10585 | // TODO: Extend type shouldn't matter (assuming legal types). |
| 10586 | if (Shift.getOpcode() == ISD::ZERO_EXTEND) |
| 10587 | Shift = Shift.getOperand(0); |
| 10588 | |
| 10589 | if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { |
| 10590 | // cvt_f32_ubyte1 (shl x, 8) -> cvt_f32_ubyte0 x |
| 10591 | // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x |
| 10592 | // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x |
| 10593 | // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x |
| 10594 | // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x |
| 10595 | if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { |
| 10596 | Shift = DAG.getZExtOrTrunc(Shift.getOperand(0), |
| 10597 | SDLoc(Shift.getOperand(0)), MVT::i32); |
| 10598 | |
| 10599 | unsigned ShiftOffset = 8 * Offset; |
| 10600 | if (Shift.getOpcode() == ISD::SHL) |
| 10601 | ShiftOffset -= C->getZExtValue(); |
| 10602 | else |
| 10603 | ShiftOffset += C->getZExtValue(); |
| 10604 | |
| 10605 | if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { |
| 10606 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, |
| 10607 | MVT::f32, Shift); |
| 10608 | } |
| 10609 | } |
| 10610 | } |
| 10611 | |
| 10612 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10613 | APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
| 10614 | if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { |
| 10615 | // We simplified Src. If this node is not dead, visit it again so it is |
| 10616 | // folded properly. |
| 10617 | if (N->getOpcode() != ISD::DELETED_NODE) |
| 10618 | DCI.AddToWorklist(N); |
| 10619 | return SDValue(N, 0); |
| 10620 | } |
| 10621 | |
| 10622 | // Handle (or x, (srl y, 8)) pattern when known bits are zero. |
| 10623 | if (SDValue DemandedSrc = |
| 10624 | TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) |
| 10625 | return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); |
| 10626 | |
| 10627 | return SDValue(); |
| 10628 | } |
| 10629 | |
| 10630 | SDValue SITargetLowering::performClampCombine(SDNode *N, |
| 10631 | DAGCombinerInfo &DCI) const { |
| 10632 | ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); |
| 10633 | if (!CSrc) |
| 10634 | return SDValue(); |
| 10635 | |
| 10636 | const MachineFunction &MF = DCI.DAG.getMachineFunction(); |
| 10637 | const APFloat &F = CSrc->getValueAPF(); |
| 10638 | APFloat Zero = APFloat::getZero(F.getSemantics()); |
| 10639 | if (F < Zero || |
| 10640 | (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { |
| 10641 | return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); |
| 10642 | } |
| 10643 | |
| 10644 | APFloat One(F.getSemantics(), "1.0" ); |
| 10645 | if (F > One) |
| 10646 | return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); |
| 10647 | |
| 10648 | return SDValue(CSrc, 0); |
| 10649 | } |
| 10650 | |
| 10651 | |
| 10652 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
| 10653 | DAGCombinerInfo &DCI) const { |
| 10654 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) |
| 10655 | return SDValue(); |
| 10656 | switch (N->getOpcode()) { |
| 10657 | case ISD::ADD: |
| 10658 | return performAddCombine(N, DCI); |
| 10659 | case ISD::SUB: |
| 10660 | return performSubCombine(N, DCI); |
| 10661 | case ISD::ADDCARRY: |
| 10662 | case ISD::SUBCARRY: |
| 10663 | return performAddCarrySubCarryCombine(N, DCI); |
| 10664 | case ISD::FADD: |
| 10665 | return performFAddCombine(N, DCI); |
| 10666 | case ISD::FSUB: |
| 10667 | return performFSubCombine(N, DCI); |
| 10668 | case ISD::SETCC: |
| 10669 | return performSetCCCombine(N, DCI); |
| 10670 | case ISD::FMAXNUM: |
| 10671 | case ISD::FMINNUM: |
| 10672 | case ISD::FMAXNUM_IEEE: |
| 10673 | case ISD::FMINNUM_IEEE: |
| 10674 | case ISD::SMAX: |
| 10675 | case ISD::SMIN: |
| 10676 | case ISD::UMAX: |
| 10677 | case ISD::UMIN: |
| 10678 | case AMDGPUISD::FMIN_LEGACY: |
| 10679 | case AMDGPUISD::FMAX_LEGACY: |
| 10680 | return performMinMaxCombine(N, DCI); |
| 10681 | case ISD::FMA: |
| 10682 | return performFMACombine(N, DCI); |
| 10683 | case ISD::AND: |
| 10684 | return performAndCombine(N, DCI); |
| 10685 | case ISD::OR: |
| 10686 | return performOrCombine(N, DCI); |
| 10687 | case ISD::XOR: |
| 10688 | return performXorCombine(N, DCI); |
| 10689 | case ISD::ZERO_EXTEND: |
| 10690 | return performZeroExtendCombine(N, DCI); |
| 10691 | case ISD::SIGN_EXTEND_INREG: |
| 10692 | return performSignExtendInRegCombine(N , DCI); |
| 10693 | case AMDGPUISD::FP_CLASS: |
| 10694 | return performClassCombine(N, DCI); |
| 10695 | case ISD::FCANONICALIZE: |
| 10696 | return performFCanonicalizeCombine(N, DCI); |
| 10697 | case AMDGPUISD::RCP: |
| 10698 | return performRcpCombine(N, DCI); |
| 10699 | case AMDGPUISD::FRACT: |
| 10700 | case AMDGPUISD::RSQ: |
| 10701 | case AMDGPUISD::RCP_LEGACY: |
| 10702 | case AMDGPUISD::RCP_IFLAG: |
| 10703 | case AMDGPUISD::RSQ_CLAMP: |
| 10704 | case AMDGPUISD::LDEXP: { |
| 10705 | // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted |
| 10706 | SDValue Src = N->getOperand(0); |
| 10707 | if (Src.isUndef()) |
| 10708 | return Src; |
| 10709 | break; |
| 10710 | } |
| 10711 | case ISD::SINT_TO_FP: |
| 10712 | case ISD::UINT_TO_FP: |
| 10713 | return performUCharToFloatCombine(N, DCI); |
| 10714 | case AMDGPUISD::CVT_F32_UBYTE0: |
| 10715 | case AMDGPUISD::CVT_F32_UBYTE1: |
| 10716 | case AMDGPUISD::CVT_F32_UBYTE2: |
| 10717 | case AMDGPUISD::CVT_F32_UBYTE3: |
| 10718 | return performCvtF32UByteNCombine(N, DCI); |
| 10719 | case AMDGPUISD::FMED3: |
| 10720 | return performFMed3Combine(N, DCI); |
| 10721 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
| 10722 | return performCvtPkRTZCombine(N, DCI); |
| 10723 | case AMDGPUISD::CLAMP: |
| 10724 | return performClampCombine(N, DCI); |
| 10725 | case ISD::SCALAR_TO_VECTOR: { |
| 10726 | SelectionDAG &DAG = DCI.DAG; |
| 10727 | EVT VT = N->getValueType(0); |
| 10728 | |
| 10729 | // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) |
| 10730 | if (VT == MVT::v2i16 || VT == MVT::v2f16) { |
| 10731 | SDLoc SL(N); |
| 10732 | SDValue Src = N->getOperand(0); |
| 10733 | EVT EltVT = Src.getValueType(); |
| 10734 | if (EltVT == MVT::f16) |
| 10735 | Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); |
| 10736 | |
| 10737 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); |
| 10738 | return DAG.getNode(ISD::BITCAST, SL, VT, Ext); |
| 10739 | } |
| 10740 | |
| 10741 | break; |
| 10742 | } |
| 10743 | case ISD::EXTRACT_VECTOR_ELT: |
| 10744 | return performExtractVectorEltCombine(N, DCI); |
| 10745 | case ISD::INSERT_VECTOR_ELT: |
| 10746 | return performInsertVectorEltCombine(N, DCI); |
| 10747 | case ISD::LOAD: { |
| 10748 | if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) |
| 10749 | return Widended; |
| 10750 | LLVM_FALLTHROUGH; |
| 10751 | } |
| 10752 | default: { |
| 10753 | if (!DCI.isBeforeLegalize()) { |
| 10754 | if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N)) |
| 10755 | return performMemSDNodeCombine(MemNode, DCI); |
| 10756 | } |
| 10757 | |
| 10758 | break; |
| 10759 | } |
| 10760 | } |
| 10761 | |
| 10762 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
| 10763 | } |
| 10764 | |
| 10765 | /// Helper function for adjustWritemask |
| 10766 | static unsigned SubIdx2Lane(unsigned Idx) { |
| 10767 | switch (Idx) { |
| 10768 | default: return ~0u; |
| 10769 | case AMDGPU::sub0: return 0; |
| 10770 | case AMDGPU::sub1: return 1; |
| 10771 | case AMDGPU::sub2: return 2; |
| 10772 | case AMDGPU::sub3: return 3; |
| 10773 | case AMDGPU::sub4: return 4; // Possible with TFE/LWE |
| 10774 | } |
| 10775 | } |
| 10776 | |
| 10777 | /// Adjust the writemask of MIMG instructions |
| 10778 | SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
| 10779 | SelectionDAG &DAG) const { |
| 10780 | unsigned Opcode = Node->getMachineOpcode(); |
| 10781 | |
| 10782 | // Subtract 1 because the vdata output is not a MachineSDNode operand. |
| 10783 | int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; |
| 10784 | if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) |
| 10785 | return Node; // not implemented for D16 |
| 10786 | |
| 10787 | SDNode *Users[5] = { nullptr }; |
| 10788 | unsigned Lane = 0; |
| 10789 | unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; |
| 10790 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); |
| 10791 | unsigned NewDmask = 0; |
| 10792 | unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; |
| 10793 | unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; |
| 10794 | bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) || |
| 10795 | Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; |
| 10796 | unsigned TFCLane = 0; |
| 10797 | bool HasChain = Node->getNumValues() > 1; |
| 10798 | |
| 10799 | if (OldDmask == 0) { |
| 10800 | // These are folded out, but on the chance it happens don't assert. |
| 10801 | return Node; |
| 10802 | } |
| 10803 | |
| 10804 | unsigned OldBitsSet = countPopulation(OldDmask); |
| 10805 | // Work out which is the TFE/LWE lane if that is enabled. |
| 10806 | if (UsesTFC) { |
| 10807 | TFCLane = OldBitsSet; |
| 10808 | } |
| 10809 | |
| 10810 | // Try to figure out the used register components |
| 10811 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
| 10812 | I != E; ++I) { |
| 10813 | |
| 10814 | // Don't look at users of the chain. |
| 10815 | if (I.getUse().getResNo() != 0) |
| 10816 | continue; |
| 10817 | |
| 10818 | // Abort if we can't understand the usage |
| 10819 | if (!I->isMachineOpcode() || |
| 10820 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
| 10821 | return Node; |
| 10822 | |
| 10823 | // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. |
| 10824 | // Note that subregs are packed, i.e. Lane==0 is the first bit set |
| 10825 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit |
| 10826 | // set, etc. |
| 10827 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
| 10828 | if (Lane == ~0u) |
| 10829 | return Node; |
| 10830 | |
| 10831 | // Check if the use is for the TFE/LWE generated result at VGPRn+1. |
| 10832 | if (UsesTFC && Lane == TFCLane) { |
| 10833 | Users[Lane] = *I; |
| 10834 | } else { |
| 10835 | // Set which texture component corresponds to the lane. |
| 10836 | unsigned Comp; |
| 10837 | for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { |
| 10838 | Comp = countTrailingZeros(Dmask); |
| 10839 | Dmask &= ~(1 << Comp); |
| 10840 | } |
| 10841 | |
| 10842 | // Abort if we have more than one user per component. |
| 10843 | if (Users[Lane]) |
| 10844 | return Node; |
| 10845 | |
| 10846 | Users[Lane] = *I; |
| 10847 | NewDmask |= 1 << Comp; |
| 10848 | } |
| 10849 | } |
| 10850 | |
| 10851 | // Don't allow 0 dmask, as hardware assumes one channel enabled. |
| 10852 | bool NoChannels = !NewDmask; |
| 10853 | if (NoChannels) { |
| 10854 | if (!UsesTFC) { |
| 10855 | // No uses of the result and not using TFC. Then do nothing. |
| 10856 | return Node; |
| 10857 | } |
| 10858 | // If the original dmask has one channel - then nothing to do |
| 10859 | if (OldBitsSet == 1) |
| 10860 | return Node; |
| 10861 | // Use an arbitrary dmask - required for the instruction to work |
| 10862 | NewDmask = 1; |
| 10863 | } |
| 10864 | // Abort if there's no change |
| 10865 | if (NewDmask == OldDmask) |
| 10866 | return Node; |
| 10867 | |
| 10868 | unsigned BitsSet = countPopulation(NewDmask); |
| 10869 | |
| 10870 | // Check for TFE or LWE - increase the number of channels by one to account |
| 10871 | // for the extra return value |
| 10872 | // This will need adjustment for D16 if this is also included in |
| 10873 | // adjustWriteMask (this function) but at present D16 are excluded. |
| 10874 | unsigned NewChannels = BitsSet + UsesTFC; |
| 10875 | |
| 10876 | int NewOpcode = |
| 10877 | AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); |
| 10878 | assert(NewOpcode != -1 && |
| 10879 | NewOpcode != static_cast<int>(Node->getMachineOpcode()) && |
| 10880 | "failed to find equivalent MIMG op" ); |
| 10881 | |
| 10882 | // Adjust the writemask in the node |
| 10883 | SmallVector<SDValue, 12> Ops; |
| 10884 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); |
| 10885 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); |
| 10886 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); |
| 10887 | |
| 10888 | MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); |
| 10889 | |
| 10890 | MVT ResultVT = NewChannels == 1 ? |
| 10891 | SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : |
| 10892 | NewChannels == 5 ? 8 : NewChannels); |
| 10893 | SDVTList NewVTList = HasChain ? |
| 10894 | DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); |
| 10895 | |
| 10896 | |
| 10897 | MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), |
| 10898 | NewVTList, Ops); |
| 10899 | |
| 10900 | if (HasChain) { |
| 10901 | // Update chain. |
| 10902 | DAG.setNodeMemRefs(NewNode, Node->memoperands()); |
| 10903 | DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); |
| 10904 | } |
| 10905 | |
| 10906 | if (NewChannels == 1) { |
| 10907 | assert(Node->hasNUsesOfValue(1, 0)); |
| 10908 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, |
| 10909 | SDLoc(Node), Users[Lane]->getValueType(0), |
| 10910 | SDValue(NewNode, 0)); |
| 10911 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
| 10912 | return nullptr; |
| 10913 | } |
| 10914 | |
| 10915 | // Update the users of the node with the new indices |
| 10916 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { |
| 10917 | SDNode *User = Users[i]; |
| 10918 | if (!User) { |
| 10919 | // Handle the special case of NoChannels. We set NewDmask to 1 above, but |
| 10920 | // Users[0] is still nullptr because channel 0 doesn't really have a use. |
| 10921 | if (i || !NoChannels) |
| 10922 | continue; |
| 10923 | } else { |
| 10924 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); |
| 10925 | DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); |
| 10926 | } |
| 10927 | |
| 10928 | switch (Idx) { |
| 10929 | default: break; |
| 10930 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
| 10931 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
| 10932 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
| 10933 | case AMDGPU::sub3: Idx = AMDGPU::sub4; break; |
| 10934 | } |
| 10935 | } |
| 10936 | |
| 10937 | DAG.RemoveDeadNode(Node); |
| 10938 | return nullptr; |
| 10939 | } |
| 10940 | |
| 10941 | static bool isFrameIndexOp(SDValue Op) { |
| 10942 | if (Op.getOpcode() == ISD::AssertZext) |
| 10943 | Op = Op.getOperand(0); |
| 10944 | |
| 10945 | return isa<FrameIndexSDNode>(Op); |
| 10946 | } |
| 10947 | |
| 10948 | /// Legalize target independent instructions (e.g. INSERT_SUBREG) |
| 10949 | /// with frame index operands. |
| 10950 | /// LLVM assumes that inputs are to these instructions are registers. |
| 10951 | SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
| 10952 | SelectionDAG &DAG) const { |
| 10953 | if (Node->getOpcode() == ISD::CopyToReg) { |
| 10954 | RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); |
| 10955 | SDValue SrcVal = Node->getOperand(2); |
| 10956 | |
| 10957 | // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have |
| 10958 | // to try understanding copies to physical registers. |
| 10959 | if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) { |
| 10960 | SDLoc SL(Node); |
| 10961 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
| 10962 | SDValue VReg = DAG.getRegister( |
| 10963 | MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); |
| 10964 | |
| 10965 | SDNode *Glued = Node->getGluedNode(); |
| 10966 | SDValue ToVReg |
| 10967 | = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, |
| 10968 | SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); |
| 10969 | SDValue ToResultReg |
| 10970 | = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), |
| 10971 | VReg, ToVReg.getValue(1)); |
| 10972 | DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); |
| 10973 | DAG.RemoveDeadNode(Node); |
| 10974 | return ToResultReg.getNode(); |
| 10975 | } |
| 10976 | } |
| 10977 | |
| 10978 | SmallVector<SDValue, 8> Ops; |
| 10979 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
| 10980 | if (!isFrameIndexOp(Node->getOperand(i))) { |
| 10981 | Ops.push_back(Node->getOperand(i)); |
| 10982 | continue; |
| 10983 | } |
| 10984 | |
| 10985 | SDLoc DL(Node); |
| 10986 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
| 10987 | Node->getOperand(i).getValueType(), |
| 10988 | Node->getOperand(i)), 0)); |
| 10989 | } |
| 10990 | |
| 10991 | return DAG.UpdateNodeOperands(Node, Ops); |
| 10992 | } |
| 10993 | |
| 10994 | /// Fold the instructions after selecting them. |
| 10995 | /// Returns null if users were already updated. |
| 10996 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
| 10997 | SelectionDAG &DAG) const { |
| 10998 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 10999 | unsigned Opcode = Node->getMachineOpcode(); |
| 11000 | |
| 11001 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && |
| 11002 | !TII->isGather4(Opcode) && |
| 11003 | AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) { |
| 11004 | return adjustWritemask(Node, DAG); |
| 11005 | } |
| 11006 | |
| 11007 | if (Opcode == AMDGPU::INSERT_SUBREG || |
| 11008 | Opcode == AMDGPU::REG_SEQUENCE) { |
| 11009 | legalizeTargetIndependentNode(Node, DAG); |
| 11010 | return Node; |
| 11011 | } |
| 11012 | |
| 11013 | switch (Opcode) { |
| 11014 | case AMDGPU::V_DIV_SCALE_F32_e64: |
| 11015 | case AMDGPU::V_DIV_SCALE_F64_e64: { |
| 11016 | // Satisfy the operand register constraint when one of the inputs is |
| 11017 | // undefined. Ordinarily each undef value will have its own implicit_def of |
| 11018 | // a vreg, so force these to use a single register. |
| 11019 | SDValue Src0 = Node->getOperand(1); |
| 11020 | SDValue Src1 = Node->getOperand(3); |
| 11021 | SDValue Src2 = Node->getOperand(5); |
| 11022 | |
| 11023 | if ((Src0.isMachineOpcode() && |
| 11024 | Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && |
| 11025 | (Src0 == Src1 || Src0 == Src2)) |
| 11026 | break; |
| 11027 | |
| 11028 | MVT VT = Src0.getValueType().getSimpleVT(); |
| 11029 | const TargetRegisterClass *RC = |
| 11030 | getRegClassFor(VT, Src0.getNode()->isDivergent()); |
| 11031 | |
| 11032 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
| 11033 | SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); |
| 11034 | |
| 11035 | SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), |
| 11036 | UndefReg, Src0, SDValue()); |
| 11037 | |
| 11038 | // src0 must be the same register as src1 or src2, even if the value is |
| 11039 | // undefined, so make sure we don't violate this constraint. |
| 11040 | if (Src0.isMachineOpcode() && |
| 11041 | Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { |
| 11042 | if (Src1.isMachineOpcode() && |
| 11043 | Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
| 11044 | Src0 = Src1; |
| 11045 | else if (Src2.isMachineOpcode() && |
| 11046 | Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
| 11047 | Src0 = Src2; |
| 11048 | else { |
| 11049 | assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); |
| 11050 | Src0 = UndefReg; |
| 11051 | Src1 = UndefReg; |
| 11052 | } |
| 11053 | } else |
| 11054 | break; |
| 11055 | |
| 11056 | SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end()); |
| 11057 | Ops[1] = Src0; |
| 11058 | Ops[3] = Src1; |
| 11059 | Ops[5] = Src2; |
| 11060 | Ops.push_back(ImpDef.getValue(1)); |
| 11061 | return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); |
| 11062 | } |
| 11063 | default: |
| 11064 | break; |
| 11065 | } |
| 11066 | |
| 11067 | return Node; |
| 11068 | } |
| 11069 | |
| 11070 | /// Assign the register class depending on the number of |
| 11071 | /// bits set in the writemask |
| 11072 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 11073 | SDNode *Node) const { |
| 11074 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 11075 | |
| 11076 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 11077 | |
| 11078 | if (TII->isVOP3(MI.getOpcode())) { |
| 11079 | // Make sure constant bus requirements are respected. |
| 11080 | TII->legalizeOperandsVOP3(MRI, MI); |
| 11081 | |
| 11082 | // Prefer VGPRs over AGPRs in mAI instructions where possible. |
| 11083 | // This saves a chain-copy of registers and better ballance register |
| 11084 | // use between vgpr and agpr as agpr tuples tend to be big. |
| 11085 | if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) { |
| 11086 | unsigned Opc = MI.getOpcode(); |
| 11087 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 11088 | for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), |
| 11089 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { |
| 11090 | if (I == -1) |
| 11091 | break; |
| 11092 | MachineOperand &Op = MI.getOperand(I); |
| 11093 | if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID && |
| 11094 | OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) || |
| 11095 | !Op.getReg().isVirtual() || !TRI->isAGPR(MRI, Op.getReg())) |
| 11096 | continue; |
| 11097 | auto *Src = MRI.getUniqueVRegDef(Op.getReg()); |
| 11098 | if (!Src || !Src->isCopy() || |
| 11099 | !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) |
| 11100 | continue; |
| 11101 | auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); |
| 11102 | auto *NewRC = TRI->getEquivalentVGPRClass(RC); |
| 11103 | // All uses of agpr64 and agpr32 can also accept vgpr except for |
| 11104 | // v_accvgpr_read, but we do not produce agpr reads during selection, |
| 11105 | // so no use checks are needed. |
| 11106 | MRI.setRegClass(Op.getReg(), NewRC); |
| 11107 | } |
| 11108 | } |
| 11109 | |
| 11110 | return; |
| 11111 | } |
| 11112 | |
| 11113 | // Replace unused atomics with the no return version. |
| 11114 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); |
| 11115 | if (NoRetAtomicOp != -1) { |
| 11116 | if (!Node->hasAnyUseOfValue(0)) { |
| 11117 | int Glc1Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), |
| 11118 | AMDGPU::OpName::glc1); |
| 11119 | if (Glc1Idx != -1) |
| 11120 | MI.RemoveOperand(Glc1Idx); |
| 11121 | MI.RemoveOperand(0); |
| 11122 | MI.setDesc(TII->get(NoRetAtomicOp)); |
| 11123 | return; |
| 11124 | } |
| 11125 | |
| 11126 | // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg |
| 11127 | // instruction, because the return type of these instructions is a vec2 of |
| 11128 | // the memory type, so it can be tied to the input operand. |
| 11129 | // This means these instructions always have a use, so we need to add a |
| 11130 | // special case to check if the atomic has only one extract_subreg use, |
| 11131 | // which itself has no uses. |
| 11132 | if ((Node->hasNUsesOfValue(1, 0) && |
| 11133 | Node->use_begin()->isMachineOpcode() && |
| 11134 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && |
| 11135 | !Node->use_begin()->hasAnyUseOfValue(0))) { |
| 11136 | Register Def = MI.getOperand(0).getReg(); |
| 11137 | |
| 11138 | // Change this into a noret atomic. |
| 11139 | MI.setDesc(TII->get(NoRetAtomicOp)); |
| 11140 | MI.RemoveOperand(0); |
| 11141 | |
| 11142 | // If we only remove the def operand from the atomic instruction, the |
| 11143 | // extract_subreg will be left with a use of a vreg without a def. |
| 11144 | // So we need to insert an implicit_def to avoid machine verifier |
| 11145 | // errors. |
| 11146 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), |
| 11147 | TII->get(AMDGPU::IMPLICIT_DEF), Def); |
| 11148 | } |
| 11149 | return; |
| 11150 | } |
| 11151 | } |
| 11152 | |
| 11153 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, |
| 11154 | uint64_t Val) { |
| 11155 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); |
| 11156 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
| 11157 | } |
| 11158 | |
| 11159 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
| 11160 | const SDLoc &DL, |
| 11161 | SDValue Ptr) const { |
| 11162 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 11163 | |
| 11164 | // Build the half of the subregister with the constants before building the |
| 11165 | // full 128-bit register. If we are building multiple resource descriptors, |
| 11166 | // this will allow CSEing of the 2-component register. |
| 11167 | const SDValue Ops0[] = { |
| 11168 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), |
| 11169 | buildSMovImm32(DAG, DL, 0), |
| 11170 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
| 11171 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
| 11172 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) |
| 11173 | }; |
| 11174 | |
| 11175 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
| 11176 | MVT::v2i32, Ops0), 0); |
| 11177 | |
| 11178 | // Combine the constants and the pointer. |
| 11179 | const SDValue Ops1[] = { |
| 11180 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
| 11181 | Ptr, |
| 11182 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), |
| 11183 | SubRegHi, |
| 11184 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) |
| 11185 | }; |
| 11186 | |
| 11187 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
| 11188 | } |
| 11189 | |
| 11190 | /// Return a resource descriptor with the 'Add TID' bit enabled |
| 11191 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] |
| 11192 | /// of the resource descriptor) to create an offset, which is added to |
| 11193 | /// the resource pointer. |
| 11194 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, |
| 11195 | SDValue Ptr, uint32_t RsrcDword1, |
| 11196 | uint64_t RsrcDword2And3) const { |
| 11197 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
| 11198 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
| 11199 | if (RsrcDword1) { |
| 11200 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
| 11201 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), |
| 11202 | 0); |
| 11203 | } |
| 11204 | |
| 11205 | SDValue DataLo = buildSMovImm32(DAG, DL, |
| 11206 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); |
| 11207 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
| 11208 | |
| 11209 | const SDValue Ops[] = { |
| 11210 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
| 11211 | PtrLo, |
| 11212 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
| 11213 | PtrHi, |
| 11214 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), |
| 11215 | DataLo, |
| 11216 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), |
| 11217 | DataHi, |
| 11218 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) |
| 11219 | }; |
| 11220 | |
| 11221 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
| 11222 | } |
| 11223 | |
| 11224 | //===----------------------------------------------------------------------===// |
| 11225 | // SI Inline Assembly Support |
| 11226 | //===----------------------------------------------------------------------===// |
| 11227 | |
| 11228 | std::pair<unsigned, const TargetRegisterClass *> |
| 11229 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
| 11230 | StringRef Constraint, |
| 11231 | MVT VT) const { |
| 11232 | const TargetRegisterClass *RC = nullptr; |
| 11233 | if (Constraint.size() == 1) { |
| 11234 | const unsigned BitWidth = VT.getSizeInBits(); |
| 11235 | switch (Constraint[0]) { |
| 11236 | default: |
| 11237 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 11238 | case 's': |
| 11239 | case 'r': |
| 11240 | switch (BitWidth) { |
| 11241 | case 16: |
| 11242 | RC = &AMDGPU::SReg_32RegClass; |
| 11243 | break; |
| 11244 | case 64: |
| 11245 | RC = &AMDGPU::SGPR_64RegClass; |
| 11246 | break; |
| 11247 | default: |
| 11248 | RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); |
| 11249 | if (!RC) |
| 11250 | return std::make_pair(0U, nullptr); |
| 11251 | break; |
| 11252 | } |
| 11253 | break; |
| 11254 | case 'v': |
| 11255 | switch (BitWidth) { |
| 11256 | case 16: |
| 11257 | RC = &AMDGPU::VGPR_32RegClass; |
| 11258 | break; |
| 11259 | default: |
| 11260 | RC = SIRegisterInfo::getVGPRClassForBitWidth(BitWidth); |
| 11261 | if (!RC) |
| 11262 | return std::make_pair(0U, nullptr); |
| 11263 | break; |
| 11264 | } |
| 11265 | break; |
| 11266 | case 'a': |
| 11267 | if (!Subtarget->hasMAIInsts()) |
| 11268 | break; |
| 11269 | switch (BitWidth) { |
| 11270 | case 16: |
| 11271 | RC = &AMDGPU::AGPR_32RegClass; |
| 11272 | break; |
| 11273 | default: |
| 11274 | RC = SIRegisterInfo::getAGPRClassForBitWidth(BitWidth); |
| 11275 | if (!RC) |
| 11276 | return std::make_pair(0U, nullptr); |
| 11277 | break; |
| 11278 | } |
| 11279 | break; |
| 11280 | } |
| 11281 | // We actually support i128, i16 and f16 as inline parameters |
| 11282 | // even if they are not reported as legal |
| 11283 | if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || |
| 11284 | VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) |
| 11285 | return std::make_pair(0U, RC); |
| 11286 | } |
| 11287 | |
| 11288 | if (Constraint.size() > 1) { |
| 11289 | if (Constraint[1] == 'v') { |
| 11290 | RC = &AMDGPU::VGPR_32RegClass; |
| 11291 | } else if (Constraint[1] == 's') { |
| 11292 | RC = &AMDGPU::SGPR_32RegClass; |
| 11293 | } else if (Constraint[1] == 'a') { |
| 11294 | RC = &AMDGPU::AGPR_32RegClass; |
| 11295 | } |
| 11296 | |
| 11297 | if (RC) { |
| 11298 | uint32_t Idx; |
| 11299 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); |
| 11300 | if (!Failed && Idx < RC->getNumRegs()) |
| 11301 | return std::make_pair(RC->getRegister(Idx), RC); |
| 11302 | } |
| 11303 | } |
| 11304 | |
| 11305 | // FIXME: Returns VS_32 for physical SGPR constraints |
| 11306 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 11307 | } |
| 11308 | |
| 11309 | static bool isImmConstraint(StringRef Constraint) { |
| 11310 | if (Constraint.size() == 1) { |
| 11311 | switch (Constraint[0]) { |
| 11312 | default: break; |
| 11313 | case 'I': |
| 11314 | case 'J': |
| 11315 | case 'A': |
| 11316 | case 'B': |
| 11317 | case 'C': |
| 11318 | return true; |
| 11319 | } |
| 11320 | } else if (Constraint == "DA" || |
| 11321 | Constraint == "DB" ) { |
| 11322 | return true; |
| 11323 | } |
| 11324 | return false; |
| 11325 | } |
| 11326 | |
| 11327 | SITargetLowering::ConstraintType |
| 11328 | SITargetLowering::getConstraintType(StringRef Constraint) const { |
| 11329 | if (Constraint.size() == 1) { |
| 11330 | switch (Constraint[0]) { |
| 11331 | default: break; |
| 11332 | case 's': |
| 11333 | case 'v': |
| 11334 | case 'a': |
| 11335 | return C_RegisterClass; |
| 11336 | } |
| 11337 | } |
| 11338 | if (isImmConstraint(Constraint)) { |
| 11339 | return C_Other; |
| 11340 | } |
| 11341 | return TargetLowering::getConstraintType(Constraint); |
| 11342 | } |
| 11343 | |
| 11344 | static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { |
| 11345 | if (!AMDGPU::isInlinableIntLiteral(Val)) { |
| 11346 | Val = Val & maskTrailingOnes<uint64_t>(Size); |
| 11347 | } |
| 11348 | return Val; |
| 11349 | } |
| 11350 | |
| 11351 | void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 11352 | std::string &Constraint, |
| 11353 | std::vector<SDValue> &Ops, |
| 11354 | SelectionDAG &DAG) const { |
| 11355 | if (isImmConstraint(Constraint)) { |
| 11356 | uint64_t Val; |
| 11357 | if (getAsmOperandConstVal(Op, Val) && |
| 11358 | checkAsmConstraintVal(Op, Constraint, Val)) { |
| 11359 | Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits()); |
| 11360 | Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64)); |
| 11361 | } |
| 11362 | } else { |
| 11363 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 11364 | } |
| 11365 | } |
| 11366 | |
| 11367 | bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { |
| 11368 | unsigned Size = Op.getScalarValueSizeInBits(); |
| 11369 | if (Size > 64) |
| 11370 | return false; |
| 11371 | |
| 11372 | if (Size == 16 && !Subtarget->has16BitInsts()) |
| 11373 | return false; |
| 11374 | |
| 11375 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { |
| 11376 | Val = C->getSExtValue(); |
| 11377 | return true; |
| 11378 | } |
| 11379 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { |
| 11380 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); |
| 11381 | return true; |
| 11382 | } |
| 11383 | if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) { |
| 11384 | if (Size != 16 || Op.getNumOperands() != 2) |
| 11385 | return false; |
| 11386 | if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef()) |
| 11387 | return false; |
| 11388 | if (ConstantSDNode *C = V->getConstantSplatNode()) { |
| 11389 | Val = C->getSExtValue(); |
| 11390 | return true; |
| 11391 | } |
| 11392 | if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) { |
| 11393 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); |
| 11394 | return true; |
| 11395 | } |
| 11396 | } |
| 11397 | |
| 11398 | return false; |
| 11399 | } |
| 11400 | |
| 11401 | bool SITargetLowering::checkAsmConstraintVal(SDValue Op, |
| 11402 | const std::string &Constraint, |
| 11403 | uint64_t Val) const { |
| 11404 | if (Constraint.size() == 1) { |
| 11405 | switch (Constraint[0]) { |
| 11406 | case 'I': |
| 11407 | return AMDGPU::isInlinableIntLiteral(Val); |
| 11408 | case 'J': |
| 11409 | return isInt<16>(Val); |
| 11410 | case 'A': |
| 11411 | return checkAsmConstraintValA(Op, Val); |
| 11412 | case 'B': |
| 11413 | return isInt<32>(Val); |
| 11414 | case 'C': |
| 11415 | return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) || |
| 11416 | AMDGPU::isInlinableIntLiteral(Val); |
| 11417 | default: |
| 11418 | break; |
| 11419 | } |
| 11420 | } else if (Constraint.size() == 2) { |
| 11421 | if (Constraint == "DA" ) { |
| 11422 | int64_t HiBits = static_cast<int32_t>(Val >> 32); |
| 11423 | int64_t LoBits = static_cast<int32_t>(Val); |
| 11424 | return checkAsmConstraintValA(Op, HiBits, 32) && |
| 11425 | checkAsmConstraintValA(Op, LoBits, 32); |
| 11426 | } |
| 11427 | if (Constraint == "DB" ) { |
| 11428 | return true; |
| 11429 | } |
| 11430 | } |
| 11431 | llvm_unreachable("Invalid asm constraint" ); |
| 11432 | } |
| 11433 | |
| 11434 | bool SITargetLowering::checkAsmConstraintValA(SDValue Op, |
| 11435 | uint64_t Val, |
| 11436 | unsigned MaxSize) const { |
| 11437 | unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize); |
| 11438 | bool HasInv2Pi = Subtarget->hasInv2PiInlineImm(); |
| 11439 | if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) || |
| 11440 | (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) || |
| 11441 | (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) { |
| 11442 | return true; |
| 11443 | } |
| 11444 | return false; |
| 11445 | } |
| 11446 | |
| 11447 | // Figure out which registers should be reserved for stack access. Only after |
| 11448 | // the function is legalized do we know all of the non-spill stack objects or if |
| 11449 | // calls are present. |
| 11450 | void SITargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 11451 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 11452 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 11453 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
| 11454 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 11455 | |
| 11456 | if (Info->isEntryFunction()) { |
| 11457 | // Callable functions have fixed registers used for stack access. |
| 11458 | reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); |
| 11459 | } |
| 11460 | |
| 11461 | assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), |
| 11462 | Info->getStackPtrOffsetReg())); |
| 11463 | if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) |
| 11464 | MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); |
| 11465 | |
| 11466 | // We need to worry about replacing the default register with itself in case |
| 11467 | // of MIR testcases missing the MFI. |
| 11468 | if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) |
| 11469 | MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); |
| 11470 | |
| 11471 | if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) |
| 11472 | MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); |
| 11473 | |
| 11474 | Info->limitOccupancy(MF); |
| 11475 | |
| 11476 | if (ST.isWave32() && !MF.empty()) { |
| 11477 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 11478 | for (auto &MBB : MF) { |
| 11479 | for (auto &MI : MBB) { |
| 11480 | TII->fixImplicitOperands(MI); |
| 11481 | } |
| 11482 | } |
| 11483 | } |
| 11484 | |
| 11485 | TargetLoweringBase::finalizeLowering(MF); |
| 11486 | |
| 11487 | // Allocate a VGPR for future SGPR Spill if |
| 11488 | // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used |
| 11489 | // FIXME: We won't need this hack if we split SGPR allocation from VGPR |
| 11490 | if (VGPRReserveforSGPRSpill && !Info->VGPRReservedForSGPRSpill && |
| 11491 | !Info->isEntryFunction() && MF.getFrameInfo().hasStackObjects()) |
| 11492 | Info->reserveVGPRforSGPRSpills(MF); |
| 11493 | } |
| 11494 | |
| 11495 | void SITargetLowering::computeKnownBitsForFrameIndex( |
| 11496 | const int FI, KnownBits &Known, const MachineFunction &MF) const { |
| 11497 | TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF); |
| 11498 | |
| 11499 | // Set the high bits to zero based on the maximum allowed scratch size per |
| 11500 | // wave. We can't use vaddr in MUBUF instructions if we don't know the address |
| 11501 | // calculation won't overflow, so assume the sign bit is never set. |
| 11502 | Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); |
| 11503 | } |
| 11504 | |
| 11505 | static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB, |
| 11506 | KnownBits &Known, unsigned Dim) { |
| 11507 | unsigned MaxValue = |
| 11508 | ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim); |
| 11509 | Known.Zero.setHighBits(countLeadingZeros(MaxValue)); |
| 11510 | } |
| 11511 | |
| 11512 | void SITargetLowering::computeKnownBitsForTargetInstr( |
| 11513 | GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts, |
| 11514 | const MachineRegisterInfo &MRI, unsigned Depth) const { |
| 11515 | const MachineInstr *MI = MRI.getVRegDef(R); |
| 11516 | switch (MI->getOpcode()) { |
| 11517 | case AMDGPU::G_INTRINSIC: { |
| 11518 | switch (MI->getIntrinsicID()) { |
| 11519 | case Intrinsic::amdgcn_workitem_id_x: |
| 11520 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); |
| 11521 | break; |
| 11522 | case Intrinsic::amdgcn_workitem_id_y: |
| 11523 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1); |
| 11524 | break; |
| 11525 | case Intrinsic::amdgcn_workitem_id_z: |
| 11526 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2); |
| 11527 | break; |
| 11528 | case Intrinsic::amdgcn_mbcnt_lo: |
| 11529 | case Intrinsic::amdgcn_mbcnt_hi: { |
| 11530 | // These return at most the wavefront size - 1. |
| 11531 | unsigned Size = MRI.getType(R).getSizeInBits(); |
| 11532 | Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2()); |
| 11533 | break; |
| 11534 | } |
| 11535 | case Intrinsic::amdgcn_groupstaticsize: { |
| 11536 | // We can report everything over the maximum size as 0. We can't report |
| 11537 | // based on the actual size because we don't know if it's accurate or not |
| 11538 | // at any given point. |
| 11539 | Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize())); |
| 11540 | break; |
| 11541 | } |
| 11542 | } |
| 11543 | break; |
| 11544 | } |
| 11545 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: |
| 11546 | Known.Zero.setHighBits(24); |
| 11547 | break; |
| 11548 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: |
| 11549 | Known.Zero.setHighBits(16); |
| 11550 | break; |
| 11551 | } |
| 11552 | } |
| 11553 | |
| 11554 | Align SITargetLowering::computeKnownAlignForTargetInstr( |
| 11555 | GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, |
| 11556 | unsigned Depth) const { |
| 11557 | const MachineInstr *MI = MRI.getVRegDef(R); |
| 11558 | switch (MI->getOpcode()) { |
| 11559 | case AMDGPU::G_INTRINSIC: |
| 11560 | case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { |
| 11561 | // FIXME: Can this move to generic code? What about the case where the call |
| 11562 | // site specifies a lower alignment? |
| 11563 | Intrinsic::ID IID = MI->getIntrinsicID(); |
| 11564 | LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); |
| 11565 | AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); |
| 11566 | if (MaybeAlign RetAlign = Attrs.getRetAlignment()) |
| 11567 | return *RetAlign; |
| 11568 | return Align(1); |
| 11569 | } |
| 11570 | default: |
| 11571 | return Align(1); |
| 11572 | } |
| 11573 | } |
| 11574 | |
| 11575 | Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { |
| 11576 | const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); |
| 11577 | const Align CacheLineAlign = Align(64); |
| 11578 | |
| 11579 | // Pre-GFX10 target did not benefit from loop alignment |
| 11580 | if (!ML || DisableLoopAlignment || |
| 11581 | (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || |
| 11582 | getSubtarget()->hasInstFwdPrefetchBug()) |
| 11583 | return PrefAlign; |
| 11584 | |
| 11585 | // On GFX10 I$ is 4 x 64 bytes cache lines. |
| 11586 | // By default prefetcher keeps one cache line behind and reads two ahead. |
| 11587 | // We can modify it with S_INST_PREFETCH for larger loops to have two lines |
| 11588 | // behind and one ahead. |
| 11589 | // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. |
| 11590 | // If loop fits 64 bytes it always spans no more than two cache lines and |
| 11591 | // does not need an alignment. |
| 11592 | // Else if loop is less or equal 128 bytes we do not need to modify prefetch, |
| 11593 | // Else if loop is less or equal 192 bytes we need two lines behind. |
| 11594 | |
| 11595 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
| 11596 | const MachineBasicBlock * = ML->getHeader(); |
| 11597 | if (Header->getAlignment() != PrefAlign) |
| 11598 | return Header->getAlignment(); // Already processed. |
| 11599 | |
| 11600 | unsigned LoopSize = 0; |
| 11601 | for (const MachineBasicBlock *MBB : ML->blocks()) { |
| 11602 | // If inner loop block is aligned assume in average half of the alignment |
| 11603 | // size to be added as nops. |
| 11604 | if (MBB != Header) |
| 11605 | LoopSize += MBB->getAlignment().value() / 2; |
| 11606 | |
| 11607 | for (const MachineInstr &MI : *MBB) { |
| 11608 | LoopSize += TII->getInstSizeInBytes(MI); |
| 11609 | if (LoopSize > 192) |
| 11610 | return PrefAlign; |
| 11611 | } |
| 11612 | } |
| 11613 | |
| 11614 | if (LoopSize <= 64) |
| 11615 | return PrefAlign; |
| 11616 | |
| 11617 | if (LoopSize <= 128) |
| 11618 | return CacheLineAlign; |
| 11619 | |
| 11620 | // If any of parent loops is surrounded by prefetch instructions do not |
| 11621 | // insert new for inner loop, which would reset parent's settings. |
| 11622 | for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { |
| 11623 | if (MachineBasicBlock *Exit = P->getExitBlock()) { |
| 11624 | auto I = Exit->getFirstNonDebugInstr(); |
| 11625 | if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) |
| 11626 | return CacheLineAlign; |
| 11627 | } |
| 11628 | } |
| 11629 | |
| 11630 | MachineBasicBlock *Pre = ML->getLoopPreheader(); |
| 11631 | MachineBasicBlock *Exit = ML->getExitBlock(); |
| 11632 | |
| 11633 | if (Pre && Exit) { |
| 11634 | BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), |
| 11635 | TII->get(AMDGPU::S_INST_PREFETCH)) |
| 11636 | .addImm(1); // prefetch 2 lines behind PC |
| 11637 | |
| 11638 | BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), |
| 11639 | TII->get(AMDGPU::S_INST_PREFETCH)) |
| 11640 | .addImm(2); // prefetch 1 line behind PC |
| 11641 | } |
| 11642 | |
| 11643 | return CacheLineAlign; |
| 11644 | } |
| 11645 | |
| 11646 | LLVM_ATTRIBUTE_UNUSED |
| 11647 | static bool isCopyFromRegOfInlineAsm(const SDNode *N) { |
| 11648 | assert(N->getOpcode() == ISD::CopyFromReg); |
| 11649 | do { |
| 11650 | // Follow the chain until we find an INLINEASM node. |
| 11651 | N = N->getOperand(0).getNode(); |
| 11652 | if (N->getOpcode() == ISD::INLINEASM || |
| 11653 | N->getOpcode() == ISD::INLINEASM_BR) |
| 11654 | return true; |
| 11655 | } while (N->getOpcode() == ISD::CopyFromReg); |
| 11656 | return false; |
| 11657 | } |
| 11658 | |
| 11659 | bool SITargetLowering::isSDNodeSourceOfDivergence( |
| 11660 | const SDNode *N, FunctionLoweringInfo *FLI, |
| 11661 | LegacyDivergenceAnalysis *KDA) const { |
| 11662 | switch (N->getOpcode()) { |
| 11663 | case ISD::CopyFromReg: { |
| 11664 | const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); |
| 11665 | const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); |
| 11666 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 11667 | Register Reg = R->getReg(); |
| 11668 | |
| 11669 | // FIXME: Why does this need to consider isLiveIn? |
| 11670 | if (Reg.isPhysical() || MRI.isLiveIn(Reg)) |
| 11671 | return !TRI->isSGPRReg(MRI, Reg); |
| 11672 | |
| 11673 | if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) |
| 11674 | return KDA->isDivergent(V); |
| 11675 | |
| 11676 | assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); |
| 11677 | return !TRI->isSGPRReg(MRI, Reg); |
| 11678 | } |
| 11679 | case ISD::LOAD: { |
| 11680 | const LoadSDNode *L = cast<LoadSDNode>(N); |
| 11681 | unsigned AS = L->getAddressSpace(); |
| 11682 | // A flat load may access private memory. |
| 11683 | return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; |
| 11684 | } |
| 11685 | case ISD::CALLSEQ_END: |
| 11686 | return true; |
| 11687 | case ISD::INTRINSIC_WO_CHAIN: |
| 11688 | return AMDGPU::isIntrinsicSourceOfDivergence( |
| 11689 | cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); |
| 11690 | case ISD::INTRINSIC_W_CHAIN: |
| 11691 | return AMDGPU::isIntrinsicSourceOfDivergence( |
| 11692 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); |
| 11693 | } |
| 11694 | return false; |
| 11695 | } |
| 11696 | |
| 11697 | bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, |
| 11698 | EVT VT) const { |
| 11699 | switch (VT.getScalarType().getSimpleVT().SimpleTy) { |
| 11700 | case MVT::f32: |
| 11701 | return hasFP32Denormals(DAG.getMachineFunction()); |
| 11702 | case MVT::f64: |
| 11703 | case MVT::f16: |
| 11704 | return hasFP64FP16Denormals(DAG.getMachineFunction()); |
| 11705 | default: |
| 11706 | return false; |
| 11707 | } |
| 11708 | } |
| 11709 | |
| 11710 | bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, |
| 11711 | const SelectionDAG &DAG, |
| 11712 | bool SNaN, |
| 11713 | unsigned Depth) const { |
| 11714 | if (Op.getOpcode() == AMDGPUISD::CLAMP) { |
| 11715 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 11716 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 11717 | |
| 11718 | if (Info->getMode().DX10Clamp) |
| 11719 | return true; // Clamped to 0. |
| 11720 | return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
| 11721 | } |
| 11722 | |
| 11723 | return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, |
| 11724 | SNaN, Depth); |
| 11725 | } |
| 11726 | |
| 11727 | // Global FP atomic instructions have a hardcoded FP mode and do not support |
| 11728 | // FP32 denormals, and only support v2f16 denormals. |
| 11729 | static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) { |
| 11730 | const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics(); |
| 11731 | auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt); |
| 11732 | if (&Flt == &APFloat::IEEEsingle()) |
| 11733 | return DenormMode == DenormalMode::getPreserveSign(); |
| 11734 | return DenormMode == DenormalMode::getIEEE(); |
| 11735 | } |
| 11736 | |
| 11737 | TargetLowering::AtomicExpansionKind |
| 11738 | SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { |
| 11739 | switch (RMW->getOperation()) { |
| 11740 | case AtomicRMWInst::FAdd: { |
| 11741 | Type *Ty = RMW->getType(); |
| 11742 | |
| 11743 | // We don't have a way to support 16-bit atomics now, so just leave them |
| 11744 | // as-is. |
| 11745 | if (Ty->isHalfTy()) |
| 11746 | return AtomicExpansionKind::None; |
| 11747 | |
| 11748 | if (!Ty->isFloatTy()) |
| 11749 | return AtomicExpansionKind::CmpXChg; |
| 11750 | |
| 11751 | // TODO: Do have these for flat. Older targets also had them for buffers. |
| 11752 | unsigned AS = RMW->getPointerAddressSpace(); |
| 11753 | |
| 11754 | if (AS == AMDGPUAS::GLOBAL_ADDRESS && Subtarget->hasAtomicFaddInsts()) { |
| 11755 | if (!fpModeMatchesGlobalFPAtomicMode(RMW)) |
| 11756 | return AtomicExpansionKind::CmpXChg; |
| 11757 | |
| 11758 | return RMW->use_empty() ? AtomicExpansionKind::None : |
| 11759 | AtomicExpansionKind::CmpXChg; |
| 11760 | } |
| 11761 | |
| 11762 | // DS FP atomics do repect the denormal mode, but the rounding mode is fixed |
| 11763 | // to round-to-nearest-even. |
| 11764 | return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ? |
| 11765 | AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg; |
| 11766 | } |
| 11767 | default: |
| 11768 | break; |
| 11769 | } |
| 11770 | |
| 11771 | return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); |
| 11772 | } |
| 11773 | |
| 11774 | const TargetRegisterClass * |
| 11775 | SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 11776 | const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); |
| 11777 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 11778 | if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) |
| 11779 | return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass |
| 11780 | : &AMDGPU::SReg_32RegClass; |
| 11781 | if (!TRI->isSGPRClass(RC) && !isDivergent) |
| 11782 | return TRI->getEquivalentSGPRClass(RC); |
| 11783 | else if (TRI->isSGPRClass(RC) && isDivergent) |
| 11784 | return TRI->getEquivalentVGPRClass(RC); |
| 11785 | |
| 11786 | return RC; |
| 11787 | } |
| 11788 | |
| 11789 | // FIXME: This is a workaround for DivergenceAnalysis not understanding always |
| 11790 | // uniform values (as produced by the mask results of control flow intrinsics) |
| 11791 | // used outside of divergent blocks. The phi users need to also be treated as |
| 11792 | // always uniform. |
| 11793 | static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, |
| 11794 | unsigned WaveSize) { |
| 11795 | // FIXME: We asssume we never cast the mask results of a control flow |
| 11796 | // intrinsic. |
| 11797 | // Early exit if the type won't be consistent as a compile time hack. |
| 11798 | IntegerType *IT = dyn_cast<IntegerType>(V->getType()); |
| 11799 | if (!IT || IT->getBitWidth() != WaveSize) |
| 11800 | return false; |
| 11801 | |
| 11802 | if (!isa<Instruction>(V)) |
| 11803 | return false; |
| 11804 | if (!Visited.insert(V).second) |
| 11805 | return false; |
| 11806 | bool Result = false; |
| 11807 | for (auto U : V->users()) { |
| 11808 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { |
| 11809 | if (V == U->getOperand(1)) { |
| 11810 | switch (Intrinsic->getIntrinsicID()) { |
| 11811 | default: |
| 11812 | Result = false; |
| 11813 | break; |
| 11814 | case Intrinsic::amdgcn_if_break: |
| 11815 | case Intrinsic::amdgcn_if: |
| 11816 | case Intrinsic::amdgcn_else: |
| 11817 | Result = true; |
| 11818 | break; |
| 11819 | } |
| 11820 | } |
| 11821 | if (V == U->getOperand(0)) { |
| 11822 | switch (Intrinsic->getIntrinsicID()) { |
| 11823 | default: |
| 11824 | Result = false; |
| 11825 | break; |
| 11826 | case Intrinsic::amdgcn_end_cf: |
| 11827 | case Intrinsic::amdgcn_loop: |
| 11828 | Result = true; |
| 11829 | break; |
| 11830 | } |
| 11831 | } |
| 11832 | } else { |
| 11833 | Result = hasCFUser(U, Visited, WaveSize); |
| 11834 | } |
| 11835 | if (Result) |
| 11836 | break; |
| 11837 | } |
| 11838 | return Result; |
| 11839 | } |
| 11840 | |
| 11841 | bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, |
| 11842 | const Value *V) const { |
| 11843 | if (const CallInst *CI = dyn_cast<CallInst>(V)) { |
| 11844 | if (CI->isInlineAsm()) { |
| 11845 | // FIXME: This cannot give a correct answer. This should only trigger in |
| 11846 | // the case where inline asm returns mixed SGPR and VGPR results, used |
| 11847 | // outside the defining block. We don't have a specific result to |
| 11848 | // consider, so this assumes if any value is SGPR, the overall register |
| 11849 | // also needs to be SGPR. |
| 11850 | const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); |
| 11851 | TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( |
| 11852 | MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI); |
| 11853 | for (auto &TC : TargetConstraints) { |
| 11854 | if (TC.Type == InlineAsm::isOutput) { |
| 11855 | ComputeConstraintToUse(TC, SDValue()); |
| 11856 | unsigned AssignedReg; |
| 11857 | const TargetRegisterClass *RC; |
| 11858 | std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint( |
| 11859 | SIRI, TC.ConstraintCode, TC.ConstraintVT); |
| 11860 | if (RC) { |
| 11861 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 11862 | if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg)) |
| 11863 | return true; |
| 11864 | else if (SIRI->isSGPRClass(RC)) |
| 11865 | return true; |
| 11866 | } |
| 11867 | } |
| 11868 | } |
| 11869 | } |
| 11870 | } |
| 11871 | SmallPtrSet<const Value *, 16> Visited; |
| 11872 | return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); |
| 11873 | } |
| 11874 | |
| 11875 | std::pair<int, MVT> |
| 11876 | SITargetLowering::getTypeLegalizationCost(const DataLayout &DL, |
| 11877 | Type *Ty) const { |
| 11878 | auto Cost = TargetLoweringBase::getTypeLegalizationCost(DL, Ty); |
| 11879 | auto Size = DL.getTypeSizeInBits(Ty); |
| 11880 | // Maximum load or store can handle 8 dwords for scalar and 4 for |
| 11881 | // vector ALU. Let's assume anything above 8 dwords is expensive |
| 11882 | // even if legal. |
| 11883 | if (Size <= 256) |
| 11884 | return Cost; |
| 11885 | |
| 11886 | Cost.first = (Size + 255) / 256; |
| 11887 | return Cost; |
| 11888 | } |
| 11889 | |